hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f572baae6462e177d7aaf6b1e4878d7035945f | 19 | py | Python | torrentool/__init__.py | erikdejonge/torrentool | e2670fc1ba8a5356726b9ba91f287414c921e626 | [
"BSD-3-Clause"
] | null | null | null | torrentool/__init__.py | erikdejonge/torrentool | e2670fc1ba8a5356726b9ba91f287414c921e626 | [
"BSD-3-Clause"
] | null | null | null | torrentool/__init__.py | erikdejonge/torrentool | e2670fc1ba8a5356726b9ba91f287414c921e626 | [
"BSD-3-Clause"
] | null | null | null | VERSION = (1, 0, 2) | 19 | 19 | 0.526316 | VERSION = (1, 0, 2) | true | true |
f7f572efec2d623a7fcbb9aa147367db2515b481 | 6,497 | py | Python | KaSaAn/functions/snapshot_visualizer_subcomponent.py | hmedina/KaSaAn | 83e4e31ff0e0062762aacfbc65bbdd290808bb51 | [
"MIT"
] | 1 | 2020-05-11T14:31:54.000Z | 2020-05-11T14:31:54.000Z | KaSaAn/functions/snapshot_visualizer_subcomponent.py | hmedina/KaSaAn | 83e4e31ff0e0062762aacfbc65bbdd290808bb51 | [
"MIT"
] | 4 | 2017-08-31T11:16:08.000Z | 2020-07-10T22:31:45.000Z | KaSaAn/functions/snapshot_visualizer_subcomponent.py | hmedina/KaSaAn | 83e4e31ff0e0062762aacfbc65bbdd290808bb51 | [
"MIT"
] | 2 | 2018-02-06T20:53:26.000Z | 2019-05-11T18:05:38.000Z | #! /usr/bin/env python3
import ast
import squarify
import warnings
import networkx as nx
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
from typing import List, Tuple
from ..core import KappaSnapshot, KappaComplex, KappaAgent
from .snapshot_visualizer_patchwork import colorize_agents
def render_complexes_as_plain_graph(snapshot_file_name: str, sizes_requested: List[int], highlight_patterns: List[str],
color_scheme_file_name: str, node_size: int, edge_width: float,
fig_size: Tuple[float, float], print_distro: bool) -> List[plt.figure]:
""""Take a KappaSnapshot, get complexes of a given size, render them as plain graphs, optionally highlighting
certain patterns. See file under `KaSaAn.scripts` for usage."""
snapshot = KappaSnapshot(snapshot_file_name)
if print_distro:
print("Snapshot's distribution, size:abundance\n" + str(snapshot.get_size_distribution()))
snapshot_agents = snapshot.get_agent_types_present()
# get list of complexes to visualize, with abundances and sizes for weighting
compl_list: List[KappaComplex] = []
abund_list: List[int] = []
sizes_list: List[int] = []
if sizes_requested:
# of requested sizes, get present ones; warn user if some requested not present
sizes_to_view = list(set(snapshot.get_all_sizes()).intersection(sizes_requested))
sizes_absent = list(set(sizes_requested).difference(set(snapshot.get_all_sizes())))
if sizes_absent:
warnings.warn('Requested size(s) <' + str(sizes_absent) + '> not found in size distribution, skipped.')
if sizes_to_view:
sizes_to_view.sort(reverse=True)
for query_size in sizes_to_view:
comps, abuns = zip(*snapshot.get_complexes_of_size(query_size))
compl_list.extend(comps)
abund_list.extend(abuns)
sizes_list.extend([query_size] * len(abuns))
else:
raise ValueError('Snapshot did not contain any of the requested sizes.')
else:
compl_list, abund_list = zip(*snapshot.get_largest_complexes())
sizes_list = [compl_list[0].get_size_of_complex()] * len(compl_list)
# read or define color scheme
# for user-defined coloring schemes, read the dictionary from a file, convert keys to KappaAgent
if color_scheme_file_name:
color_scheme = {}
with open(color_scheme_file_name, 'r') as cs_file:
coloring_scheme_raw = ast.literal_eval(cs_file.read())
for key, value in coloring_scheme_raw.items():
color_scheme[KappaAgent(key)] = value
else:
color_scheme = colorize_agents(snapshot_agents)
# using squarify to define axis locations based on complex sizes
fig_width = 1
fig_height = 1
fig_origin_x = 0
fig_origin_y = 0
norm_sizes = squarify.normalize_sizes(sizes_list, fig_width, fig_height)
axis_rects: List[dict] = squarify.padded_squarify(norm_sizes, fig_origin_x, fig_origin_y, fig_width, fig_height)
# for each complex, get the networkx graph and define node positions
compl_graphs = [compl.to_networkx() for compl in compl_list]
node_positions = [graphviz_layout(compl_graph, prog='sfdp') for compl_graph in compl_graphs]
plotting_data = list(zip(compl_graphs, abund_list, axis_rects, node_positions))
# figure list construction
fig_list: List[plt.figure] = []
# construct the all-agent figure
fig_all = plt.figure(figsize=fig_size)
for comp, abund, rect, npos in plotting_data:
ax = fig_all.add_axes([rect['x'], rect['y'], rect['dx'], rect['dy']])
ax.set_title(label='#: ' + str(abund), pad=-2*mpl.rcParams['axes.titlepad'])
# try to assign color to nodes based on color scheme
axis_color_list = []
for node in comp.nodes.data():
agent_name = node[1]['kappa'].get_agent_name()
try:
axis_color_list.append(color_scheme[KappaAgent(agent_name)])
except KeyError as k_e:
raise ValueError('Complex contains agent <' + agent_name + '> not found in coloring palette.') from k_e
nx.draw(comp, pos=npos, ax=ax, node_color=axis_color_list, with_labels=False,
node_size=node_size, width=edge_width)
# construct the all-agent legend
legend_entries = []
for agent in snapshot_agents:
patch_label = agent.get_agent_name()
patch_color = color_scheme[agent] if agent in color_scheme else '#00000000'
legend_entries.append(mpatches.Patch(label=patch_label, color=patch_color))
fig_all.legend(handles=legend_entries)
fig_list.append(fig_all)
# construct the patter-specific figures
if highlight_patterns:
for string_pattern in highlight_patterns:
kappa_query = KappaAgent(string_pattern)
fig_patt = plt.figure(figsize=fig_size)
for comp, abund, rect, npos in plotting_data:
ax = fig_patt.add_axes([rect['x'], rect['y'], rect['dx'], rect['dy']])
ax.set_title(label='#: ' + str(abund), pad=-2 * mpl.rcParams['axes.titlepad'])
# try to assign color to nodes based on color scheme and user-supplied pattern
axis_color_list = []
for node in comp.nodes.data():
node_agent = node[1]['kappa']
try:
if kappa_query in node_agent:
axis_color_list.append(color_scheme[KappaAgent(kappa_query.get_agent_name())])
else:
axis_color_list.append('#00000000')
except KeyError as k_e:
raise ValueError('Complex contains agent <' + node_agent.get_agent_name() +
'> not found in supplied palette.') from k_e
nx.draw(comp, pos=npos, ax=ax, node_color=axis_color_list, with_labels=False,
node_size=node_size, width=edge_width)
patch_label = str(kappa_query)
patch_color = color_scheme[KappaAgent(kappa_query.get_agent_name())]
legend_entry = mpatches.Patch(label=patch_label, color=patch_color)
fig_patt.legend(handles=[legend_entry])
fig_list.append(fig_patt)
return fig_list
| 53.694215 | 119 | 0.658766 |
import ast
import squarify
import warnings
import networkx as nx
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
from typing import List, Tuple
from ..core import KappaSnapshot, KappaComplex, KappaAgent
from .snapshot_visualizer_patchwork import colorize_agents
def render_complexes_as_plain_graph(snapshot_file_name: str, sizes_requested: List[int], highlight_patterns: List[str],
color_scheme_file_name: str, node_size: int, edge_width: float,
fig_size: Tuple[float, float], print_distro: bool) -> List[plt.figure]:
snapshot = KappaSnapshot(snapshot_file_name)
if print_distro:
print("Snapshot's distribution, size:abundance\n" + str(snapshot.get_size_distribution()))
snapshot_agents = snapshot.get_agent_types_present()
# get list of complexes to visualize, with abundances and sizes for weighting
compl_list: List[KappaComplex] = []
abund_list: List[int] = []
sizes_list: List[int] = []
if sizes_requested:
# of requested sizes, get present ones; warn user if some requested not present
sizes_to_view = list(set(snapshot.get_all_sizes()).intersection(sizes_requested))
sizes_absent = list(set(sizes_requested).difference(set(snapshot.get_all_sizes())))
if sizes_absent:
warnings.warn('Requested size(s) <' + str(sizes_absent) + '> not found in size distribution, skipped.')
if sizes_to_view:
sizes_to_view.sort(reverse=True)
for query_size in sizes_to_view:
comps, abuns = zip(*snapshot.get_complexes_of_size(query_size))
compl_list.extend(comps)
abund_list.extend(abuns)
sizes_list.extend([query_size] * len(abuns))
else:
raise ValueError('Snapshot did not contain any of the requested sizes.')
else:
compl_list, abund_list = zip(*snapshot.get_largest_complexes())
sizes_list = [compl_list[0].get_size_of_complex()] * len(compl_list)
# read or define color scheme
# for user-defined coloring schemes, read the dictionary from a file, convert keys to KappaAgent
if color_scheme_file_name:
color_scheme = {}
with open(color_scheme_file_name, 'r') as cs_file:
coloring_scheme_raw = ast.literal_eval(cs_file.read())
for key, value in coloring_scheme_raw.items():
color_scheme[KappaAgent(key)] = value
else:
color_scheme = colorize_agents(snapshot_agents)
# using squarify to define axis locations based on complex sizes
fig_width = 1
fig_height = 1
fig_origin_x = 0
fig_origin_y = 0
norm_sizes = squarify.normalize_sizes(sizes_list, fig_width, fig_height)
axis_rects: List[dict] = squarify.padded_squarify(norm_sizes, fig_origin_x, fig_origin_y, fig_width, fig_height)
# for each complex, get the networkx graph and define node positions
compl_graphs = [compl.to_networkx() for compl in compl_list]
node_positions = [graphviz_layout(compl_graph, prog='sfdp') for compl_graph in compl_graphs]
plotting_data = list(zip(compl_graphs, abund_list, axis_rects, node_positions))
# figure list construction
fig_list: List[plt.figure] = []
# construct the all-agent figure
fig_all = plt.figure(figsize=fig_size)
for comp, abund, rect, npos in plotting_data:
ax = fig_all.add_axes([rect['x'], rect['y'], rect['dx'], rect['dy']])
ax.set_title(label='
# try to assign color to nodes based on color scheme
axis_color_list = []
for node in comp.nodes.data():
agent_name = node[1]['kappa'].get_agent_name()
try:
axis_color_list.append(color_scheme[KappaAgent(agent_name)])
except KeyError as k_e:
raise ValueError('Complex contains agent <' + agent_name + '> not found in coloring palette.') from k_e
nx.draw(comp, pos=npos, ax=ax, node_color=axis_color_list, with_labels=False,
node_size=node_size, width=edge_width)
# construct the all-agent legend
legend_entries = []
for agent in snapshot_agents:
patch_label = agent.get_agent_name()
patch_color = color_scheme[agent] if agent in color_scheme else '
legend_entries.append(mpatches.Patch(label=patch_label, color=patch_color))
fig_all.legend(handles=legend_entries)
fig_list.append(fig_all)
# construct the patter-specific figures
if highlight_patterns:
for string_pattern in highlight_patterns:
kappa_query = KappaAgent(string_pattern)
fig_patt = plt.figure(figsize=fig_size)
for comp, abund, rect, npos in plotting_data:
ax = fig_patt.add_axes([rect['x'], rect['y'], rect['dx'], rect['dy']])
ax.set_title(label='
# try to assign color to nodes based on color scheme and user-supplied pattern
axis_color_list = []
for node in comp.nodes.data():
node_agent = node[1]['kappa']
try:
if kappa_query in node_agent:
axis_color_list.append(color_scheme[KappaAgent(kappa_query.get_agent_name())])
else:
axis_color_list.append('
except KeyError as k_e:
raise ValueError('Complex contains agent <' + node_agent.get_agent_name() +
'> not found in supplied palette.') from k_e
nx.draw(comp, pos=npos, ax=ax, node_color=axis_color_list, with_labels=False,
node_size=node_size, width=edge_width)
patch_label = str(kappa_query)
patch_color = color_scheme[KappaAgent(kappa_query.get_agent_name())]
legend_entry = mpatches.Patch(label=patch_label, color=patch_color)
fig_patt.legend(handles=[legend_entry])
fig_list.append(fig_patt)
return fig_list
| true | true |
f7f5736590d965ae539f000b45a5a34df6fc363d | 3,816 | py | Python | python/ccxt/async_support/okex.py | 15210770501/ccxt | 4a682d92a23ccb768234dd7c0557a8ce4ccf8118 | [
"MIT"
] | null | null | null | python/ccxt/async_support/okex.py | 15210770501/ccxt | 4a682d92a23ccb768234dd7c0557a8ce4ccf8118 | [
"MIT"
] | null | null | null | python/ccxt/async_support/okex.py | 15210770501/ccxt | 4a682d92a23ccb768234dd7c0557a8ce4ccf8118 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.okcoinusd import okcoinusd
class okex (okcoinusd):
def describe(self):
return self.deep_extend(super(okex, self).describe(), {
'id': 'okex',
'name': 'OKEX',
'countries': ['CN', 'US'],
'has': {
'CORS': False,
'futures': True,
'fetchTickers': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32552768-0d6dd3c6-c4a6-11e7-90f8-c043b64756a7.jpg',
'api': {
'web': 'https://www.okex.com/v2',
'public': 'https://www.okex.com/api',
'private': 'https://www.okex.com/api',
},
'www': 'https://www.okex.com',
'doc': 'https://github.com/okcoin-okex/API-docs-OKEx.com',
'fees': 'https://www.okex.com/pages/products/fees.html',
},
'commonCurrencies': {
'FAIR': 'FairGame',
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'YOYO': 'YOYOW',
},
'options': {
'fetchTickersMethod': 'fetch_tickers_from_api',
},
})
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(symbol, cost)),
}
async def fetch_markets(self):
markets = await super(okex, self).fetch_markets()
# TODO: they have a new fee schedule as of Feb 7
# the new fees are progressive and depend on 30-day traded volume
# the following is the worst case
for i in range(0, len(markets)):
if markets[i]['spot']:
markets[i]['maker'] = 0.0015
markets[i]['taker'] = 0.0020
else:
markets[i]['maker'] = 0.0003
markets[i]['taker'] = 0.0005
return markets
async def fetch_tickers_from_api(self, symbols=None, params={}):
await self.load_markets()
request = {}
response = await self.publicGetTickers(self.extend(request, params))
tickers = response['tickers']
timestamp = int(response['date']) * 1000
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
ticker = self.parse_ticker(self.extend(tickers[i], {'timestamp': timestamp}))
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_tickers_from_web(self, symbols=None, params={}):
await self.load_markets()
request = {}
response = await self.webGetSpotMarketsTickers(self.extend(request, params))
tickers = response['data']
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_tickers(self, symbols=None, params={}):
method = self.options['fetchTickersMethod']
response = await getattr(self, method)(symbols, params)
return response
| 36.692308 | 126 | 0.527778 |
rt.okcoinusd import okcoinusd
class okex (okcoinusd):
def describe(self):
return self.deep_extend(super(okex, self).describe(), {
'id': 'okex',
'name': 'OKEX',
'countries': ['CN', 'US'],
'has': {
'CORS': False,
'futures': True,
'fetchTickers': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32552768-0d6dd3c6-c4a6-11e7-90f8-c043b64756a7.jpg',
'api': {
'web': 'https://www.okex.com/v2',
'public': 'https://www.okex.com/api',
'private': 'https://www.okex.com/api',
},
'www': 'https://www.okex.com',
'doc': 'https://github.com/okcoin-okex/API-docs-OKEx.com',
'fees': 'https://www.okex.com/pages/products/fees.html',
},
'commonCurrencies': {
'FAIR': 'FairGame',
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'YOYO': 'YOYOW',
},
'options': {
'fetchTickersMethod': 'fetch_tickers_from_api',
},
})
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(symbol, cost)),
}
async def fetch_markets(self):
markets = await super(okex, self).fetch_markets()
for i in range(0, len(markets)):
if markets[i]['spot']:
markets[i]['maker'] = 0.0015
markets[i]['taker'] = 0.0020
else:
markets[i]['maker'] = 0.0003
markets[i]['taker'] = 0.0005
return markets
async def fetch_tickers_from_api(self, symbols=None, params={}):
await self.load_markets()
request = {}
response = await self.publicGetTickers(self.extend(request, params))
tickers = response['tickers']
timestamp = int(response['date']) * 1000
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
ticker = self.parse_ticker(self.extend(tickers[i], {'timestamp': timestamp}))
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_tickers_from_web(self, symbols=None, params={}):
await self.load_markets()
request = {}
response = await self.webGetSpotMarketsTickers(self.extend(request, params))
tickers = response['data']
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_tickers(self, symbols=None, params={}):
method = self.options['fetchTickersMethod']
response = await getattr(self, method)(symbols, params)
return response
| true | true |
f7f5749e96191da7b79aa26f88ab3c8cbd56f24b | 10,761 | py | Python | doc/sphinxext/gallery_generator.py | EwoutH/seaborn | 630c08d7bd1c2a39362eda2389e8822357057775 | [
"BSD-3-Clause"
] | null | null | null | doc/sphinxext/gallery_generator.py | EwoutH/seaborn | 630c08d7bd1c2a39362eda2389e8822357057775 | [
"BSD-3-Clause"
] | null | null | null | doc/sphinxext/gallery_generator.py | EwoutH/seaborn | 630c08d7bd1c2a39362eda2389e8822357057775 | [
"BSD-3-Clause"
] | null | null | null | """
Sphinx plugin to run example scripts and create a gallery page.
Lightly modified from the mpld3 project.
"""
import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
import warnings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa: E402
# Python 3 has no execfile
def execfile(filename, globals=None, locals=None):
with open(filename, "rb") as fp:
exec(compile(fp.read(), filename, 'exec'), globals, locals)
RST_TEMPLATE = """
.. currentmodule:: seaborn
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**seaborn components used:** {components}
.. raw:: html
<div class="col-md-9">
.. literalinclude:: {fname}
:lines: {end_line}-
.. raw:: html
</div>
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolute;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=275, height=275,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = matplotlib.image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
if all(thumb.shape):
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
else:
warnings.warn(
f"Bad thumbnail crop. {thumbfile} will be empty."
)
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator:
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename) as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
file_mtime = op.getmtime(filename)
if not op.exists(outfilename) or op.getmtime(outfilename) < file_mtime:
self.exec_file()
else:
print(f"skipping {self.filename}")
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
@property
def components(self):
objects = re.findall(r"sns\.(\w+)\(", self.filetext)
refs = []
for obj in objects:
if obj[0].isupper():
refs.append(f":class:`{obj}`")
else:
refs.append(f":func:`{obj}`")
return ", ".join(refs)
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iter = lines.__iter__()
tokens = tokenize.generate_tokens(lambda: next(line_iter))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print(f"running {self.filename}")
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = f"<img src=../{self.pngfilename}>"
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return f" ./{op.splitext(self.htmlfilename)[0]}\n\n"
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{}>\n"
" <img src=../_static/{}>\n"
" <span class='figure-label'>\n"
" <p>{}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir, '..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in sorted(glob.glob(op.join(source_dir, "*.py"))):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
components=ex.components,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
| 26.9025 | 79 | 0.534151 | import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
import warnings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def execfile(filename, globals=None, locals=None):
with open(filename, "rb") as fp:
exec(compile(fp.read(), filename, 'exec'), globals, locals)
RST_TEMPLATE = """
.. currentmodule:: seaborn
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**seaborn components used:** {components}
.. raw:: html
<div class="col-md-9">
.. literalinclude:: {fname}
:lines: {end_line}-
.. raw:: html
</div>
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolute;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=275, height=275,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = matplotlib.image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
if all(thumb.shape):
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
else:
warnings.warn(
f"Bad thumbnail crop. {thumbfile} will be empty."
)
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator:
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename) as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# exist or it was modified less recently than the example
file_mtime = op.getmtime(filename)
if not op.exists(outfilename) or op.getmtime(outfilename) < file_mtime:
self.exec_file()
else:
print(f"skipping {self.filename}")
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
@property
def components(self):
objects = re.findall(r"sns\.(\w+)\(", self.filetext)
refs = []
for obj in objects:
if obj[0].isupper():
refs.append(f":class:`{obj}`")
else:
refs.append(f":func:`{obj}`")
return ", ".join(refs)
def extract_docstring(self):
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iter = lines.__iter__()
tokens = tokenize.generate_tokens(lambda: next(line_iter))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print(f"running {self.filename}")
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = f"<img src=../{self.pngfilename}>"
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return f" ./{op.splitext(self.htmlfilename)[0]}\n\n"
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{}>\n"
" <img src=../_static/{}>\n"
" <span class='figure-label'>\n"
" <p>{}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir, '..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in sorted(glob.glob(op.join(source_dir, "*.py"))):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
components=ex.components,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
| true | true |
f7f574ba66c71f974f9f9f8760bb96a83b66813b | 9,175 | py | Python | train_loc.py | VITA-Group/VGAI | c36746383da42b011dddf182303f8c2618d5ca42 | [
"MIT"
] | 3 | 2021-07-11T16:15:17.000Z | 2021-08-24T07:50:10.000Z | train_loc.py | VITA-Group/VGAI | c36746383da42b011dddf182303f8c2618d5ca42 | [
"MIT"
] | null | null | null | train_loc.py | VITA-Group/VGAI | c36746383da42b011dddf182303f8c2618d5ca42 | [
"MIT"
] | 1 | 2022-01-07T14:37:21.000Z | 2022-01-07T14:37:21.000Z | import os
import sys
import time
import glob
import numpy as np
import torch
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.backends.cudnn as cudnn
import joint_network as models
from dataset_loc import OneHopDataset
from torch.utils.data import Dataset, DataLoader
import shutil
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith('__')
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser("training")
parser.add_argument('arch', metavar='ARCH', default='vis_dagnn',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: vis_dagnn)')
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.0005, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=5, help='num of training epochs')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--decreasing_lr', default='5,10,15', help='decreasing strategy')
parser.add_argument('--K', type=int, default=3, help='filter length')
parser.add_argument('--vinit', type=float, default=3.0, help='maximum intial velocity')
parser.add_argument('--radius', type=float, default=1.5, help='communication radius')
parser.add_argument('--F', type=int, default=24, help='number of feature dimension')
parser.add_argument('--comm-model', default='disk', choices=['disk', 'knn'], help='communication model')
parser.add_argument('--K-neighbor', type=int, default=10, help='number of KNN neighbors')
parser.add_argument('--mode', type=str, default='optimal', choices=['optimal', 'local', 'loc_dagnn', 'vis_dagnn', 'vis_grnn', 'loc_grnn'])
args = parser.parse_args()
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
model = models.__dict__[args.arch](n_vis_out=args.F, K=args.K)
model = model.cuda()
train_criterion = torch.nn.SmoothL1Loss()
criterion = torch.nn.SmoothL1Loss() #torch.nn.MSELoss(reduction='mean')
train_criterion = train_criterion.cuda()
criterion = criterion.cuda()
train_criterion = train_criterion.cuda()
criterion = criterion.cuda()
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay
)
if args.comm_model == 'disk':
f_name = '{}_K_{}_n_vis_{}_R_{}_vinit_{}_comm_model_{}.pkl'.format(args.mode, args.K, args.F, args.radius, args.vinit, args.comm_model)
else:
f_name = '{}_K_{}_n_vis_{}_vinit_{}_comm_model_{}_K_neighbor_{}.pkl'.format(args.mode, args.K, args.F, args.vinit, args.comm_model, args.K_neighbor)
drone_dataset = OneHopDataset(f_name=f_name, K=args.K, R=args.radius)
num_train = len(drone_dataset)
indices = list(range(num_train))
split = int(np.floor(0.9 * num_train))
train_queue = torch.utils.data.DataLoader(drone_dataset,batch_size=args.batch_size, num_workers=1, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True)
valid_queue = torch.utils.data.DataLoader(drone_dataset,batch_size=1, num_workers=2, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True)
print('Training Joint Network')
#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=decreasing_lr, gamma=0.1)
for epoch in range(args.epochs):
#scheduler.step()
#logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
train_loss = train_joint(train_queue, model, train_criterion, optimizer)
valid_loss = infer_joint(valid_queue, model, criterion)
#checkpoint_path = 'checkpoint_all_{}_{}_K_{}_n_vis_{}_R_{}_vinit_{}_comm_model_{}.tar'.format(args.mode, args.arch, args.K, args.F, args.radius, args.vinit, args.comm_model)
#checkpoint_path = 'checkpoint_all_vis_{}_{}_latest.tar'.format(args.F, args.arch)
if args.comm_model == 'disk':
checkpoint_path = 'checkpoint_all_{}_{}_K_{}_n_vis_{}_R_{}_vinit_{}_comm_model_{}.tar'.format(args.mode, args.arch, args.K, args.F, args.radius, args.vinit, args.comm_model)
else:
checkpoint_path = 'checkpoint_all_{}_{}_K_{}_n_vis_{}_vinit_{}_comm_model_{}_K_neighbor_{}.tar'.format(args.mode, args.arch, args.K, args.F, args.vinit, args.comm_model, args.K_neighbor)
if True:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'loss': valid_loss,
}, filename=checkpoint_path)
best_valid_loss = valid_loss
print('epoch ' + str(epoch) + ' train loss ' + str(train_loss) + ' valid loss ' + str(valid_loss))
def train_joint(train_queue, model, criterion, optimizer):
objs = AvgrageMeter()
model.train()
print('len of train queue = {}'.format(len(train_queue)))
total_loss = 0
for step, sample_batched in enumerate(train_queue, 0):
#x_img = sample_batched['x_img'].float().cuda().squeeze(0)
x_agg = sample_batched['x_agg'].float().cuda().squeeze(0)
a_nets = sample_batched['anets'].float().cuda().squeeze(0)
actions = sample_batched['actions'].float().cuda().squeeze(0)
if args.arch == 'vis_grnn':
input_state = torch.from_numpy(np.zeros((x_img.shape[0], args.F))).float().cuda()
pred_agg, pred = model(x_img, a_nets, input_state)
elif args.arch == 'loc_dagnn':
pred = model(x_agg, a_nets)
elif args.arch == 'loc_grnn':
input_state = torch.from_numpy(np.zeros((x_agg.shape[0], 6))).float().cuda()
pred_agg, pred = model(x_agg, a_nets, input_state)
else:
pred_agg, pred = model(x_img, a_nets)
loss = criterion(pred, actions)
#print('loss = {}'.format(loss))
total_loss += loss
if step % 1 == 0:
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
total_loss = 0
n = pred.size(0)
objs.update(loss.item(), n)
if step % args.report_freq == 0:
print('-----')
print('train step ' + str(step) + ' loss ' + str(objs.avg))
#print('pred = {}'.format(pred))
#print('actions = {}'.format(actions))
print('-----')
return objs.avg
def infer_joint(valid_queue, model, criterion):
objs = AvgrageMeter()
model.eval()
for step, sample_batched in enumerate(valid_queue, 0):
#x_img = sample_batched['x_img'].float().cuda().squeeze(0)
x_agg = sample_batched['x_agg'].float().cuda().squeeze(0)
a_nets = sample_batched['anets'].float().cuda().squeeze(0)
actions = sample_batched['actions'].float().cuda().squeeze(0)
if args.arch == 'vis_grnn':
input_state = torch.from_numpy(np.zeros((x_img.shape[0], args.F))).float().cuda()
_, pred = model(x_img, a_nets, input_state)
elif args.arch == 'loc_dagnn':
pred = model(x_agg, a_nets)
elif args.arch == 'loc_grnn':
input_state = torch.from_numpy(np.zeros((x_agg.shape[0], 6))).float().cuda()
pred_agg, pred = model(x_agg, a_nets, input_state)
else:
_, pred = model(x_img, a_nets)
loss = criterion(pred, actions)
n = pred.size(0)
objs.update(loss.item(), n)
if step % args.report_freq == 0:
print('-----')
print('valid step ' + str(step) + ' loss ' + str(objs.avg))
print('-----')
return objs.avg
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if __name__ == '__main__':
main()
| 37.757202 | 196 | 0.669537 | import os
import sys
import time
import glob
import numpy as np
import torch
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.backends.cudnn as cudnn
import joint_network as models
from dataset_loc import OneHopDataset
from torch.utils.data import Dataset, DataLoader
import shutil
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith('__')
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser("training")
parser.add_argument('arch', metavar='ARCH', default='vis_dagnn',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: vis_dagnn)')
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.0005, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=5, help='num of training epochs')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--decreasing_lr', default='5,10,15', help='decreasing strategy')
parser.add_argument('--K', type=int, default=3, help='filter length')
parser.add_argument('--vinit', type=float, default=3.0, help='maximum intial velocity')
parser.add_argument('--radius', type=float, default=1.5, help='communication radius')
parser.add_argument('--F', type=int, default=24, help='number of feature dimension')
parser.add_argument('--comm-model', default='disk', choices=['disk', 'knn'], help='communication model')
parser.add_argument('--K-neighbor', type=int, default=10, help='number of KNN neighbors')
parser.add_argument('--mode', type=str, default='optimal', choices=['optimal', 'local', 'loc_dagnn', 'vis_dagnn', 'vis_grnn', 'loc_grnn'])
args = parser.parse_args()
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
model = models.__dict__[args.arch](n_vis_out=args.F, K=args.K)
model = model.cuda()
train_criterion = torch.nn.SmoothL1Loss()
criterion = torch.nn.SmoothL1Loss()
train_criterion = train_criterion.cuda()
criterion = criterion.cuda()
train_criterion = train_criterion.cuda()
criterion = criterion.cuda()
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay
)
if args.comm_model == 'disk':
f_name = '{}_K_{}_n_vis_{}_R_{}_vinit_{}_comm_model_{}.pkl'.format(args.mode, args.K, args.F, args.radius, args.vinit, args.comm_model)
else:
f_name = '{}_K_{}_n_vis_{}_vinit_{}_comm_model_{}_K_neighbor_{}.pkl'.format(args.mode, args.K, args.F, args.vinit, args.comm_model, args.K_neighbor)
drone_dataset = OneHopDataset(f_name=f_name, K=args.K, R=args.radius)
num_train = len(drone_dataset)
indices = list(range(num_train))
split = int(np.floor(0.9 * num_train))
train_queue = torch.utils.data.DataLoader(drone_dataset,batch_size=args.batch_size, num_workers=1, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True)
valid_queue = torch.utils.data.DataLoader(drone_dataset,batch_size=1, num_workers=2, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True)
print('Training Joint Network')
for epoch in range(args.epochs):
train_loss = train_joint(train_queue, model, train_criterion, optimizer)
valid_loss = infer_joint(valid_queue, model, criterion)
if args.comm_model == 'disk':
checkpoint_path = 'checkpoint_all_{}_{}_K_{}_n_vis_{}_R_{}_vinit_{}_comm_model_{}.tar'.format(args.mode, args.arch, args.K, args.F, args.radius, args.vinit, args.comm_model)
else:
checkpoint_path = 'checkpoint_all_{}_{}_K_{}_n_vis_{}_vinit_{}_comm_model_{}_K_neighbor_{}.tar'.format(args.mode, args.arch, args.K, args.F, args.vinit, args.comm_model, args.K_neighbor)
if True:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'loss': valid_loss,
}, filename=checkpoint_path)
best_valid_loss = valid_loss
print('epoch ' + str(epoch) + ' train loss ' + str(train_loss) + ' valid loss ' + str(valid_loss))
def train_joint(train_queue, model, criterion, optimizer):
objs = AvgrageMeter()
model.train()
print('len of train queue = {}'.format(len(train_queue)))
total_loss = 0
for step, sample_batched in enumerate(train_queue, 0):
x_agg = sample_batched['x_agg'].float().cuda().squeeze(0)
a_nets = sample_batched['anets'].float().cuda().squeeze(0)
actions = sample_batched['actions'].float().cuda().squeeze(0)
if args.arch == 'vis_grnn':
input_state = torch.from_numpy(np.zeros((x_img.shape[0], args.F))).float().cuda()
pred_agg, pred = model(x_img, a_nets, input_state)
elif args.arch == 'loc_dagnn':
pred = model(x_agg, a_nets)
elif args.arch == 'loc_grnn':
input_state = torch.from_numpy(np.zeros((x_agg.shape[0], 6))).float().cuda()
pred_agg, pred = model(x_agg, a_nets, input_state)
else:
pred_agg, pred = model(x_img, a_nets)
loss = criterion(pred, actions)
total_loss += loss
if step % 1 == 0:
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
total_loss = 0
n = pred.size(0)
objs.update(loss.item(), n)
if step % args.report_freq == 0:
print('-----')
print('train step ' + str(step) + ' loss ' + str(objs.avg))
print('-----')
return objs.avg
def infer_joint(valid_queue, model, criterion):
objs = AvgrageMeter()
model.eval()
for step, sample_batched in enumerate(valid_queue, 0):
x_agg = sample_batched['x_agg'].float().cuda().squeeze(0)
a_nets = sample_batched['anets'].float().cuda().squeeze(0)
actions = sample_batched['actions'].float().cuda().squeeze(0)
if args.arch == 'vis_grnn':
input_state = torch.from_numpy(np.zeros((x_img.shape[0], args.F))).float().cuda()
_, pred = model(x_img, a_nets, input_state)
elif args.arch == 'loc_dagnn':
pred = model(x_agg, a_nets)
elif args.arch == 'loc_grnn':
input_state = torch.from_numpy(np.zeros((x_agg.shape[0], 6))).float().cuda()
pred_agg, pred = model(x_agg, a_nets, input_state)
else:
_, pred = model(x_img, a_nets)
loss = criterion(pred, actions)
n = pred.size(0)
objs.update(loss.item(), n)
if step % args.report_freq == 0:
print('-----')
print('valid step ' + str(step) + ' loss ' + str(objs.avg))
print('-----')
return objs.avg
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if __name__ == '__main__':
main()
| true | true |
f7f5754fc2fafcdc2dbb7f43617a7ad530693822 | 1,929 | py | Python | data-mining/model2http-api.py | RoderickLi/python-snippet | 7cc672c455a768864cf38d6bbebdf8337b9c510c | [
"MIT"
] | 6 | 2019-11-06T02:11:43.000Z | 2021-03-21T02:48:10.000Z | data-mining/model2http-api.py | RoderickLi/python-snippet | 7cc672c455a768864cf38d6bbebdf8337b9c510c | [
"MIT"
] | null | null | null | data-mining/model2http-api.py | RoderickLi/python-snippet | 7cc672c455a768864cf38d6bbebdf8337b9c510c | [
"MIT"
] | 2 | 2019-11-06T02:11:44.000Z | 2019-11-06T02:46:00.000Z | import tensorflow as tf
def save_model_to_serving(model, export_version, export_path='model/'):
print(model.input, model.output)
signature = tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'img_input': model.input[0], # mutil input support
'extra_input': model.input[1],
},
outputs={'outputs': model.output}
)
export_path = os.path.join(
tf.compat.as_bytes(export_path),
tf.compat.as_bytes(str(export_version)))
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
sess=K.get_session(),
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
'serving_default': signature,
},
legacy_init_op=legacy_init_op)
builder.save()
save_model_to_serving(good_naive, "1", './save_to_serving') # save to different directory according to the second params indicating version
import requests
import json
payload = {
"instances": [
{
"img_input": img_input_1.tolist(), #x_val[0][0].tolist(),
"extra_input": extra_input_1.tolist(), # x_val[1][0].tolist(),
}
]
}
url = 'http://example.com/predict'
r = requests.post(url, json=payload)
pred = json.loads(r.content.decode('utf-8'))
pred
| 48.225 | 158 | 0.488336 | import tensorflow as tf
def save_model_to_serving(model, export_version, export_path='model/'):
print(model.input, model.output)
signature = tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'img_input': model.input[0],
'extra_input': model.input[1],
},
outputs={'outputs': model.output}
)
export_path = os.path.join(
tf.compat.as_bytes(export_path),
tf.compat.as_bytes(str(export_version)))
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
sess=K.get_session(),
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
'serving_default': signature,
},
legacy_init_op=legacy_init_op)
builder.save()
save_model_to_serving(good_naive, "1", './save_to_serving')
import requests
import json
payload = {
"instances": [
{
"img_input": img_input_1.tolist(),
"extra_input": extra_input_1.tolist(),
}
]
}
url = 'http://example.com/predict'
r = requests.post(url, json=payload)
pred = json.loads(r.content.decode('utf-8'))
pred
| true | true |
f7f575a9dc57ea85a1fa0b1f4bc84d180b7b541d | 2,080 | py | Python | addons/mod_watch.py | xGhostBoyx/Kurisu | 4ce89e0914fdde113f5b5c749644af1e59c52193 | [
"Apache-2.0"
] | 3 | 2019-05-24T09:31:04.000Z | 2021-09-07T18:56:29.000Z | addons/mod_watch.py | xGhostBoyx/Kurisu | 4ce89e0914fdde113f5b5c749644af1e59c52193 | [
"Apache-2.0"
] | 1 | 2018-05-31T01:13:49.000Z | 2018-05-31T01:13:49.000Z | addons/mod_watch.py | T3CHNOLOG1C/Zoidbot | 0100816670ad988e51ffbce1d3c2725f8ed69439 | [
"Apache-2.0"
] | 1 | 2019-05-24T07:01:37.000Z | 2019-05-24T07:01:37.000Z | import discord
import json
from discord.ext import commands
from sys import argv
class Modwatch:
"""
User watch management commands.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True)
async def watch(self, ctx, user):
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
self.bot.watching[member.id] = "{}#{}".format(member.name, member.discriminator)
with open("data/watch.json", "w") as f:
json.dump(self.bot.watching, f)
await self.bot.say("{} is being watched.".format(member.mention))
msg = "👀 **Watch**: {} put {} on watch | {}#{}".format(ctx.message.author.mention, member.mention, member.name, member.discriminator)
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.messagelogs_channel, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True)
async def unwatch(self, ctx, user):
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
if member.id not in self.bot.watching:
await self.bot.say("This user was not being watched.")
return
self.bot.watching.pop(member.id)
with open("data/watch.json", "w") as f:
json.dump(self.bot.watching, f)
await self.bot.say("{} is no longer being watched.".format(member.mention))
msg = "❌ **Unwatch**: {} removed {} from watch | {}#{}".format(ctx.message.author.mention, member.mention, member.name, member.discriminator)
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.messagelogs_channel, msg)
def setup(bot):
bot.add_cog(Modwatch(bot))
| 40.784314 | 149 | 0.638462 | import discord
import json
from discord.ext import commands
from sys import argv
class Modwatch:
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True)
async def watch(self, ctx, user):
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
self.bot.watching[member.id] = "{}#{}".format(member.name, member.discriminator)
with open("data/watch.json", "w") as f:
json.dump(self.bot.watching, f)
await self.bot.say("{} is being watched.".format(member.mention))
msg = "👀 **Watch**: {} put {} on watch | {}#{}".format(ctx.message.author.mention, member.mention, member.name, member.discriminator)
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.messagelogs_channel, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True)
async def unwatch(self, ctx, user):
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
if member.id not in self.bot.watching:
await self.bot.say("This user was not being watched.")
return
self.bot.watching.pop(member.id)
with open("data/watch.json", "w") as f:
json.dump(self.bot.watching, f)
await self.bot.say("{} is no longer being watched.".format(member.mention))
msg = "❌ **Unwatch**: {} removed {} from watch | {}#{}".format(ctx.message.author.mention, member.mention, member.name, member.discriminator)
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.messagelogs_channel, msg)
def setup(bot):
bot.add_cog(Modwatch(bot))
| true | true |
f7f57624632a6f3123b1d590975b1dd756279944 | 321 | py | Python | main.py | patrickstocklin/pychallongesearch | b3d523881a8485ab7af3ce5f0f785bb0de3277f0 | [
"MIT"
] | 3 | 2018-09-19T23:49:27.000Z | 2019-06-07T13:58:45.000Z | main.py | patrickstocklin/pychallongesearch | b3d523881a8485ab7af3ce5f0f785bb0de3277f0 | [
"MIT"
] | 1 | 2021-06-01T22:37:56.000Z | 2021-06-01T22:37:56.000Z | main.py | patrickstocklin/pychallongesearch | b3d523881a8485ab7af3ce5f0f785bb0de3277f0 | [
"MIT"
] | 1 | 2019-04-24T10:38:06.000Z | 2019-04-24T10:38:06.000Z | # -*- coding: utf-8 -*-
from pychallongesearch import pychallongesearch as pcs
'''
CONSTANTS
'''
def main():
print "Starting Main"
pcsearch = pcs.PyChallongeSearch("127.0.0.1","9200")
pcsearch.brackets.ingest_bracket();
# pcsearch.stats.test() //analysis
print "Done"
if __name__ == '__main__':
main() | 17.833333 | 54 | 0.672897 |
from pychallongesearch import pychallongesearch as pcs
'''
CONSTANTS
'''
def main():
print "Starting Main"
pcsearch = pcs.PyChallongeSearch("127.0.0.1","9200")
pcsearch.brackets.ingest_bracket();
print "Done"
if __name__ == '__main__':
main() | false | true |
f7f576b68002f19e0d3f32fe10ae94dfbdd87326 | 514 | py | Python | examples/00_howdoi/static.py | Kitware/trame | 41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f | [
"Apache-2.0"
] | 42 | 2021-09-24T22:10:32.000Z | 2022-03-30T19:39:25.000Z | examples/00_howdoi/static.py | Kitware/trame | 41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f | [
"Apache-2.0"
] | 31 | 2021-10-01T21:19:56.000Z | 2022-03-04T00:14:28.000Z | examples/00_howdoi/static.py | Kitware/trame | 41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f | [
"Apache-2.0"
] | 7 | 2021-11-17T16:12:06.000Z | 2022-03-26T21:08:40.000Z | r"""
Version for trame 1.x - https://github.com/Kitware/trame/blob/release-v1/examples/howdoi/static.py
Delta v1..v2 - https://github.com/Kitware/trame/commit/25ce0c5b46f1f5ae1f0838fe2a539e0b8b0d7f5e
"""
from trame.app import get_server
from trame.ui.vuetify import VAppLayout
server = get_server()
html = """
<h3>Welcome to trame...</h3>
<div>
<i>Hello</i> <b>World</b>
</div>
"""
with VAppLayout(server) as layout:
layout.root.add_child(html)
if __name__ == "__main__":
server.start()
| 21.416667 | 104 | 0.700389 |
from trame.app import get_server
from trame.ui.vuetify import VAppLayout
server = get_server()
html = """
<h3>Welcome to trame...</h3>
<div>
<i>Hello</i> <b>World</b>
</div>
"""
with VAppLayout(server) as layout:
layout.root.add_child(html)
if __name__ == "__main__":
server.start()
| true | true |
f7f578479b3ba0e098c67c2d23574c822a4d1645 | 10,328 | py | Python | python/GafferTest/StandardSetTest.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | python/GafferTest/StandardSetTest.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | python/GafferTest/StandardSetTest.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import weakref
import gc
import IECore
import Gaffer
import GafferTest
class StandardSetTest( GafferTest.TestCase ) :
def testRunTimeTyped( self ) :
s = Gaffer.StandardSet()
self.assertEqual( s.typeName(), "Gaffer::StandardSet" )
self.assertEqual( s.staticTypeName(), "Gaffer::StandardSet" )
self.assertTrue( s.isInstanceOf( IECore.TypeId.RunTimeTyped ) )
def testBasicMethods( self ) :
s = Gaffer.StandardSet()
self.assertEqual( len( s ), 0 )
self.assertEqual( s.size(), 0 )
n1 = Gaffer.Node()
n2 = Gaffer.Node()
self.assertNotIn( n1, s )
self.assertNotIn( n2, s )
a = s.add( n1 )
self.assertEqual( a, True )
self.assertIn( n1, s )
self.assertTrue( not n1 not in s )
self.assertNotIn( n2, s )
self.assertEqual( len( s ), 1 )
self.assertEqual( s.size(), 1 )
a = s.add( n1 )
self.assertEqual( a, False )
self.assertIn( n1, s )
self.assertTrue( not n1 not in s )
self.assertNotIn( n2, s )
self.assertEqual( len( s ), 1 )
self.assertEqual( s.size(), 1 )
a = s.add( n2 )
self.assertEqual( a, True )
self.assertIn( n1, s )
self.assertIn( n2, s )
self.assertEqual( len( s ), 2 )
self.assertEqual( s.size(), 2 )
a = s.remove( n1 )
self.assertEqual( a, True )
self.assertNotIn( n1, s )
self.assertIn( n2, s )
self.assertEqual( len( s ), 1 )
self.assertEqual( s.size(), 1 )
a = s.remove( n1 )
self.assertEqual( a, False )
self.assertNotIn( n1, s )
self.assertIn( n2, s )
self.assertEqual( len( s ), 1 )
self.assertEqual( s.size(), 1 )
s.clear()
self.assertNotIn( n1, s )
self.assertNotIn( n2, s )
self.assertEqual( len( s ), 0 )
self.assertEqual( s.size(), 0 )
def testGetItem( self ) :
s = Gaffer.StandardSet()
n1 = Gaffer.Node()
n2 = Gaffer.Node()
s.add( n1 )
s.add( n2 )
self.assertTrue( s[0].isSame( n1 ) )
self.assertTrue( s[1].isSame( n2 ) )
self.assertTrue( s[-1].isSame( n2 ) )
self.assertTrue( s[-2].isSame( n1 ) )
self.assertRaises( IndexError, s.__getitem__, 2 )
self.assertRaises( IndexError, s.__getitem__, -3 )
def testMemberOrdering( self ) :
s = Gaffer.StandardSet()
for i in range( 0, 1000 ) :
n = Gaffer.Node()
n.setName( "s" + str( i ) )
s.add( n )
self.assertEqual( len( s ), 1000 )
for i in range( 0, len( s ) ) :
self.assertEqual( s[i].getName(), "s" + str( i ) )
def testLastAdded( self ) :
s = Gaffer.StandardSet()
for i in range( 0, 1000 ) :
n = Gaffer.Node()
s.add( n )
self.assertTrue( s[-1].isSame( n ) )
def testSignals( self ) :
ps = set()
def added( set, member ) :
ps.add( member )
def removed( set, member ) :
ps.remove( member )
s = Gaffer.StandardSet()
s.memberAddedSignal().connect( added, scoped = False )
s.memberRemovedSignal().connect( removed, scoped = False )
n1 = Gaffer.Node()
n2 = Gaffer.Node()
n3 = Gaffer.Node()
s.add( n1 )
s.add( n2 )
s.add( n3 )
self.assertEqual( ps, set( s ) )
s.remove( n1 )
s.remove( n2 )
self.assertEqual( ps, set( s ) )
s.add( n1 )
s.add( n2 )
s.clear()
self.assertEqual( ps, set( s ) )
def testConstructFromSequence( self ) :
n1 = Gaffer.Node()
n2 = Gaffer.Node()
n3 = Gaffer.Node()
s = Gaffer.StandardSet( ( n1, n2 ) )
self.assertIn( n1, s )
self.assertIn( n2, s )
self.assertNotIn( n3, s )
def testAddAndRemoveFromSequence( self ) :
n = ( Gaffer.Node(), Gaffer.Node(), Gaffer.Node() )
s = Gaffer.StandardSet()
s.add( n )
self.assertEqual( set( n ), set( s ) )
s.remove( n )
self.assertEqual( len( s ), 0 )
self.assertEqual( set(), set( s ) )
def testMemberAcceptanceSignals( self ) :
s = Gaffer.StandardSet()
def f( s, m ) :
return m.isInstanceOf( Gaffer.Plug.staticTypeId() )
s.memberAcceptanceSignal().connect( f, scoped = False )
n = Gaffer.Node()
p = Gaffer.Plug()
self.assertRaises( Exception, s.add, n )
self.assertNotIn( n, s )
s.add( p )
self.assertIn( p, s )
def testMembershipQueries( self ) :
members = [ Gaffer.Node(), Gaffer.Node(), Gaffer.Node() ]
notMembers = [ Gaffer.Node(), Gaffer.Node(), Gaffer.Node() ]
s = Gaffer.StandardSet( members )
for m in members :
self.assertIn( m, s )
self.assertTrue( s.contains( m ) )
for m in notMembers :
self.assertNotIn( m, s )
self.assertFalse( s.contains( m ) )
def testIteration( self ) :
members = [ Gaffer.Node(), Gaffer.Node(), Gaffer.Node() ]
s = Gaffer.StandardSet( members )
i = 0
for m in s :
self.assertTrue( m.isSame( members[i] ) )
i += 1
def testRemoveReferenceCounting( self ) :
s = Gaffer.StandardSet()
for i in range( 0, 100 ) :
s.add( IECore.StringData( "hello there!" ) )
def f( s, m ) :
pass
s.memberRemovedSignal().connect( f, scoped = False )
s.clear()
def testAddAndRemoveFromSet( self ) :
ints = [ IECore.IntData( i ) for i in range( 0, 10 ) ]
all = Gaffer.StandardSet( ints )
evens = Gaffer.StandardSet( [ x for x in ints if x.value % 2 == 0 ] )
odds = Gaffer.StandardSet( [ x for x in ints if x.value % 2 == 1 ] )
s = Gaffer.StandardSet()
self.assertEqual( s.add( evens ), len( evens ) )
self.assertEqual( len( s ), len( evens ) )
for e in evens :
self.assertTrue( e in s )
self.assertEqual( s.add( odds ), len( odds ) )
self.assertEqual( len( s ), len( all ) )
for e in all :
self.assertTrue( e in s )
self.assertEqual( s.remove( evens ), len( evens ) )
self.assertEqual( len( s ), len( odds ) )
for e in odds :
self.assertTrue( e in s )
def testSlicing( self ) :
l = [ IECore.IntData( i ) for i in range( 0, 10 ) ]
s = Gaffer.StandardSet( l )
self.assertEqual( l[:], s[:] )
self.assertEqual( l[1:4], s[1:4] )
self.assertEqual( l[:4], s[:4] )
self.assertEqual( l[2:], s[2:] )
self.assertEqual( l[2:-2], s[2:-2] )
self.assertEqual( l[:40], s[:40] )
self.assertEqual( l[1:-20], s[1:-20] )
def testOrphanRemoval( self ) :
p = Gaffer.GraphComponent()
c1 = Gaffer.GraphComponent()
c2 = Gaffer.GraphComponent()
p["c1"] = c1
p["c2"] = c2
s = Gaffer.StandardSet( p.children(), removeOrphans = True )
self.assertTrue( s.getRemoveOrphans() )
self.assertEqual( len( s ), 2 )
self.assertTrue( c1 in s )
self.assertTrue( c2 in s )
p.removeChild( c1 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertTrue( c2 in s )
p["c1"] = c1
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertTrue( c2 in s )
s.add( c1 )
self.assertEqual( len( s ), 2 )
self.assertTrue( c1 in s )
self.assertTrue( c2 in s )
p.removeChild( c1 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertTrue( c2 in s )
p.removeChild( c2 )
self.assertEqual( len( s ), 0 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
c3 = Gaffer.GraphComponent()
s.add( c3 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertTrue( c3 in s )
p["c3"] = c3
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertTrue( c3 in s )
p.removeChild( c3 )
self.assertEqual( len( s ), 0 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertFalse( c3 in s )
p["c3"] = c3
s.add( c3 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertTrue( c3 in s )
s.setRemoveOrphans( False )
self.assertFalse( s.getRemoveOrphans() )
p.removeChild( c3 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertTrue( c3 in s )
s.setRemoveOrphans( True )
self.assertTrue( s.getRemoveOrphans() )
p.addChild( c3 )
p.removeChild( c3 )
self.assertEqual( len( s ), 0 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertFalse( c3 in s )
def testNoOrphanRemoval( self ) :
p = Gaffer.GraphComponent()
c1 = Gaffer.GraphComponent()
c2 = Gaffer.GraphComponent()
p["c1"] = c1
p["c2"] = c2
s1 = Gaffer.StandardSet( p.children() )
s2 = Gaffer.StandardSet( p.children(), removeOrphans = False )
self.assertFalse( s1.getRemoveOrphans() )
self.assertFalse( s2.getRemoveOrphans() )
p.removeChild( c1 )
p.removeChild( c2 )
self.assertTrue( c1 in s1 )
self.assertTrue( c2 in s1 )
self.assertTrue( c1 in s2 )
self.assertTrue( c2 in s2 )
if __name__ == "__main__":
unittest.main()
| 24.018605 | 77 | 0.631487 | ) )
self.assertEqual( len( s ), len( all ) )
for e in all :
self.assertTrue( e in s )
self.assertEqual( s.remove( evens ), len( evens ) )
self.assertEqual( len( s ), len( odds ) )
for e in odds :
self.assertTrue( e in s )
def testSlicing( self ) :
l = [ IECore.IntData( i ) for i in range( 0, 10 ) ]
s = Gaffer.StandardSet( l )
self.assertEqual( l[:], s[:] )
self.assertEqual( l[1:4], s[1:4] )
self.assertEqual( l[:4], s[:4] )
self.assertEqual( l[2:], s[2:] )
self.assertEqual( l[2:-2], s[2:-2] )
self.assertEqual( l[:40], s[:40] )
self.assertEqual( l[1:-20], s[1:-20] )
def testOrphanRemoval( self ) :
p = Gaffer.GraphComponent()
c1 = Gaffer.GraphComponent()
c2 = Gaffer.GraphComponent()
p["c1"] = c1
p["c2"] = c2
s = Gaffer.StandardSet( p.children(), removeOrphans = True )
self.assertTrue( s.getRemoveOrphans() )
self.assertEqual( len( s ), 2 )
self.assertTrue( c1 in s )
self.assertTrue( c2 in s )
p.removeChild( c1 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertTrue( c2 in s )
p["c1"] = c1
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertTrue( c2 in s )
s.add( c1 )
self.assertEqual( len( s ), 2 )
self.assertTrue( c1 in s )
self.assertTrue( c2 in s )
p.removeChild( c1 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertTrue( c2 in s )
p.removeChild( c2 )
self.assertEqual( len( s ), 0 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
c3 = Gaffer.GraphComponent()
s.add( c3 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertTrue( c3 in s )
p["c3"] = c3
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertTrue( c3 in s )
p.removeChild( c3 )
self.assertEqual( len( s ), 0 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertFalse( c3 in s )
p["c3"] = c3
s.add( c3 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertTrue( c3 in s )
s.setRemoveOrphans( False )
self.assertFalse( s.getRemoveOrphans() )
p.removeChild( c3 )
self.assertEqual( len( s ), 1 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertTrue( c3 in s )
s.setRemoveOrphans( True )
self.assertTrue( s.getRemoveOrphans() )
p.addChild( c3 )
p.removeChild( c3 )
self.assertEqual( len( s ), 0 )
self.assertFalse( c1 in s )
self.assertFalse( c2 in s )
self.assertFalse( c3 in s )
def testNoOrphanRemoval( self ) :
p = Gaffer.GraphComponent()
c1 = Gaffer.GraphComponent()
c2 = Gaffer.GraphComponent()
p["c1"] = c1
p["c2"] = c2
s1 = Gaffer.StandardSet( p.children() )
s2 = Gaffer.StandardSet( p.children(), removeOrphans = False )
self.assertFalse( s1.getRemoveOrphans() )
self.assertFalse( s2.getRemoveOrphans() )
p.removeChild( c1 )
p.removeChild( c2 )
self.assertTrue( c1 in s1 )
self.assertTrue( c2 in s1 )
self.assertTrue( c1 in s2 )
self.assertTrue( c2 in s2 )
if __name__ == "__main__":
unittest.main()
| true | true |
f7f578684babacac24d27a28f613944927ffee1c | 6,148 | py | Python | contextlab/utils/layer_misc.py | SHTUPLUS/ContextLab | 4e12f0af9d0640f29c763b915f02de763b577200 | [
"MIT"
] | 41 | 2019-09-28T08:12:34.000Z | 2021-03-17T02:52:17.000Z | contextlab/utils/layer_misc.py | SHTUPLUS/ContextLab | 4e12f0af9d0640f29c763b915f02de763b577200 | [
"MIT"
] | null | null | null | contextlab/utils/layer_misc.py | SHTUPLUS/ContextLab | 4e12f0af9d0640f29c763b915f02de763b577200 | [
"MIT"
] | 4 | 2019-12-24T08:59:35.000Z | 2022-02-26T14:39:23.000Z | import torch
from torch import nn
import torch.nn.functional as F
__all__ = ['ConvBNReLU']
class ConvBNReLU(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
norm_layer=nn.BatchNorm2d,
with_relu=True,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros'):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode)
self.bn = norm_layer(out_channels)
self.with_relu = with_relu
if with_relu:
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.with_relu:
x = self.relu(x)
return x
class GraphAdjNetwork(nn.Module):
def __init__(self,
pair_function,
in_channels,
channel_stride):
super(GraphAdjNetwork, self).__init__()
self.pair_function = pair_function
if pair_function == 'embedded_gaussian':
inter_channel = in_channels // channel_stride
self.phi = ConvBNReLU(
in_channels=in_channels,
out_channels=inter_channel,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
self.theta = ConvBNReLU(
in_channels=in_channels,
out_channels=inter_channel,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
elif pair_function == 'gaussian':
pass
elif pair_function == 'diff_learnable':
self.learnable_adj_conv = ConvBNReLU(
in_channels=in_channels,
out_channels=1,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
elif pair_function == 'sum_learnable':
self.learnable_adj_conv = ConvBNReLU(
in_channels=in_channels,
out_channels=1,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
elif pair_function == 'cat_learnable':
self.learnable_adj_conv = ConvBNReLU(
in_channels=in_channels*2,
out_channels=1,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
else:
raise NotImplementedError
def forward(self, x):
"""
Args:
x (Tensor):
(B, N, C)
"""
if self.pair_function == 'gaussian':
adj = self.gaussian(x, x.permute(0, 2, 1))
elif self.pair_function == 'embedded_gaussian':
x = x.permute(0, 2, 1).unsqueeze(-1)
x_1 = self.phi(x) # B, C, N, 1
x_2 = self.theta(x) # B, C, N, 1
adj = self.gaussian(
x_1.squeeze(-1).permute(0, 2, 1), x_2.squeeze(-1))
elif self.pair_function == 'diff_learnable':
adj = self.diff_learnable_adj(x.unsqueeze(2), x.unsqueeze(1))
elif self.pair_function == 'sum_learnable':
adj = self.sum_learnable_adj(x.unsqueeze(2), x.unsqueeze(1))
elif self.pair_function == 'cat_learnable':
adj = self.cat_learnable_adj(x.unsqueeze(2), x.unsqueeze(1))
else:
raise NotImplementedError(self.pair_function)
return adj
def gaussian(self, x_1, x_2):
"""
Args:
x_1:
x_2:
Return:
adj: normalized in the last dimenstion
"""
# (B, N, C) X (B, C, N) --> (B, N, N)
adj = torch.bmm(x_1, x_2) # B, N, N
adj = F.softmax(adj, dim=-1) # B, N, N
return adj
def diff_learnable_adj(self, x_1, x_2):
"""
Learnable attention from the difference of the feature
Return:
adj: normalzied at the last dimension
"""
# x1:(B,N,1,C)
# x2:(B,1,N,C)
feature_diff = x_1 - x_2 # (B, N, N, C)
feature_diff = feature_diff.permute(0, 3, 1, 2) # (B, C, N, N)
adj = self.learnable_adj_conv(feature_diff) # (B, 1, N, N)
adj = adj.squeeze(1) # (B, N, N)
# Use the number of nodes as the normalization factor
adj = adj / adj.size(-1) # (B, N, N)
return adj
def sum_learnable_adj(self, x_1, x_2):
"""
Learnable attention from the difference of the feature
Return:
adj: normalzied at the last dimension
"""
# x1:(B,N,1,C)
# x2:(B,1,N,C)
feature_diff = x_1 + x_2 # (B, N, N, C)
feature_diff = feature_diff.permute(0, 3, 1, 2) # (B, C, N, N)
adj = self.learnable_adj_conv(feature_diff) # (B, 1, N, N)
adj = adj.squeeze(1) # (B, N, N)
# Use the number of nodes as the normalization factor
adj = adj / adj.size(-1) # (B, N, N)
return adj
def cat_learnable_adj(self, x_1, x_2):
"""
Learable attention from the concatnation of the features
"""
x_1 = x_1.repeat(1, 1, x_1.size(1), 1) # B, N, N, C
x_2 = x_2.repeat(1, x_2.size(2), 1, 1) # B, N, N, C
feature_cat = torch.cat([x_1, x_2], dim=-1) # B, N, N, 2C
# import pdb; pdb.set_trace()
feature_cat = feature_cat.permute(0, 3, 1, 2) # B, 2C, N, N
adj = self.learnable_adj_conv(feature_cat) # B, 1, N, N
adj = adj.squeeze(1) # (B, N, N)
# Use the number of nodes as the normalization factor
adj = adj / adj.size(-1) # (B, N, N)
return adj
| 33.053763 | 73 | 0.508133 | import torch
from torch import nn
import torch.nn.functional as F
__all__ = ['ConvBNReLU']
class ConvBNReLU(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
norm_layer=nn.BatchNorm2d,
with_relu=True,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros'):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode)
self.bn = norm_layer(out_channels)
self.with_relu = with_relu
if with_relu:
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.with_relu:
x = self.relu(x)
return x
class GraphAdjNetwork(nn.Module):
def __init__(self,
pair_function,
in_channels,
channel_stride):
super(GraphAdjNetwork, self).__init__()
self.pair_function = pair_function
if pair_function == 'embedded_gaussian':
inter_channel = in_channels // channel_stride
self.phi = ConvBNReLU(
in_channels=in_channels,
out_channels=inter_channel,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
self.theta = ConvBNReLU(
in_channels=in_channels,
out_channels=inter_channel,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
elif pair_function == 'gaussian':
pass
elif pair_function == 'diff_learnable':
self.learnable_adj_conv = ConvBNReLU(
in_channels=in_channels,
out_channels=1,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
elif pair_function == 'sum_learnable':
self.learnable_adj_conv = ConvBNReLU(
in_channels=in_channels,
out_channels=1,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
elif pair_function == 'cat_learnable':
self.learnable_adj_conv = ConvBNReLU(
in_channels=in_channels*2,
out_channels=1,
kernel_size=1,
bias=False,
norm_layer=nn.BatchNorm2d
)
else:
raise NotImplementedError
def forward(self, x):
if self.pair_function == 'gaussian':
adj = self.gaussian(x, x.permute(0, 2, 1))
elif self.pair_function == 'embedded_gaussian':
x = x.permute(0, 2, 1).unsqueeze(-1)
x_1 = self.phi(x)
x_2 = self.theta(x)
adj = self.gaussian(
x_1.squeeze(-1).permute(0, 2, 1), x_2.squeeze(-1))
elif self.pair_function == 'diff_learnable':
adj = self.diff_learnable_adj(x.unsqueeze(2), x.unsqueeze(1))
elif self.pair_function == 'sum_learnable':
adj = self.sum_learnable_adj(x.unsqueeze(2), x.unsqueeze(1))
elif self.pair_function == 'cat_learnable':
adj = self.cat_learnable_adj(x.unsqueeze(2), x.unsqueeze(1))
else:
raise NotImplementedError(self.pair_function)
return adj
def gaussian(self, x_1, x_2):
adj = torch.bmm(x_1, x_2)
adj = F.softmax(adj, dim=-1)
return adj
def diff_learnable_adj(self, x_1, x_2):
feature_diff = x_1 - x_2
feature_diff = feature_diff.permute(0, 3, 1, 2)
adj = self.learnable_adj_conv(feature_diff)
adj = adj.squeeze(1)
adj = adj / adj.size(-1)
return adj
def sum_learnable_adj(self, x_1, x_2):
feature_diff = x_1 + x_2
feature_diff = feature_diff.permute(0, 3, 1, 2)
adj = self.learnable_adj_conv(feature_diff)
adj = adj.squeeze(1)
adj = adj / adj.size(-1)
return adj
def cat_learnable_adj(self, x_1, x_2):
x_1 = x_1.repeat(1, 1, x_1.size(1), 1)
x_2 = x_2.repeat(1, x_2.size(2), 1, 1)
feature_cat = torch.cat([x_1, x_2], dim=-1)
feature_cat = feature_cat.permute(0, 3, 1, 2)
adj = self.learnable_adj_conv(feature_cat)
adj = adj.squeeze(1)
adj = adj / adj.size(-1)
return adj
| true | true |
f7f5793ab6dd3269a5cdd6a91d20c88c3b1e74e8 | 345 | py | Python | django-channles-tutorial/chat/routing.py | masa1203/django_mypractice | c3ffef6f19a4dd16e2947f36c88f1f5773b427ce | [
"MIT"
] | null | null | null | django-channles-tutorial/chat/routing.py | masa1203/django_mypractice | c3ffef6f19a4dd16e2947f36c88f1f5773b427ce | [
"MIT"
] | 26 | 2019-12-25T19:26:23.000Z | 2021-09-22T19:11:28.000Z | django-channles-tutorial/chat/routing.py | sawady1203/django_mypractice | c3ffef6f19a4dd16e2947f36c88f1f5773b427ce | [
"MIT"
] | 1 | 2021-06-10T05:14:02.000Z | 2021-06-10T05:14:02.000Z | # chat\routing.py
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/$', consumers.ChatConsumer)
]
# ここでre_pathを使う理由がよくわからない
# (Note we use re_path() due to limitations in URLRouter.)
# https://channels.readthedocs.io/en/latest/topics/routing.html#urlrouter
| 26.538462 | 74 | 0.730435 |
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/$', consumers.ChatConsumer)
]
| true | true |
f7f579e6488daea2866663f03c95cc4c106a392d | 5,466 | py | Python | tests/test_serialization.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | null | null | null | tests/test_serialization.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | 1 | 2019-06-05T09:35:09.000Z | 2020-07-02T09:46:46.000Z | tests/test_serialization.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2017, 2018 Jae-jun Kang
# See the file LICENSE for details.
import datetime
import sys
import pytest
sys.path.append('..')
import x2py
from x2py.deserializer import Deserializer
from x2py.serializer import Serializer
from test import *
def test_byte():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [0, 1, 0x07f, 0x080, 0x0ff ]:
s.write_byte(None, test_value)
value = d.read_byte(None)
assert value == test_value
def test_int8():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ -128, -1, 0, 1, 127 ]:
s.write_int8(None, test_value)
value = d.read_int8(None)
assert value == test_value
def test_int16():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ -32768, -1, 0, 1, 32767 ]:
s.write_int16(None, test_value)
value = d.read_int16(None)
assert value == test_value
def test_int32():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ -2147483648, -1, 0, 1, 2147483647 ]:
s.write_int32(None, test_value)
value = d.read_int32(None)
assert value == test_value
def test_int64():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ -9223372036854775808, -1, 0, 1, 9223372036854775807 ]:
s.write_int64(None, test_value)
value = d.read_int64(None)
assert value == test_value
def test_float64():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ 0.01 ]:
s.write_float64(None, test_value)
value = d.read_float64(None)
assert value == test_value
def test_nonnegative():
s = Serializer()
assert(len(s.buffer) == 0)
s.write_nonnegative(0)
assert(len(s.buffer) == 1)
d = Deserializer(s.buffer)
v, n = d.read_nonnegative()
assert(v == 0)
assert(n == 1)
assert(len(s.buffer) == 1)
s.write_nonnegative(1)
v, n = d.read_nonnegative()
assert(v == 1)
assert(n == 1)
s.write_nonnegative(127)
v, n = d.read_nonnegative()
assert(v == 127)
assert(n == 1)
s.write_nonnegative(128)
v, n = d.read_nonnegative()
assert(v == 128)
assert(n == 2)
def test_string():
if sys.version_info.major >= 3:
strs = ['abcd', '한글']
else:
strs = ['abcd']
for s in strs:
encoded = s.encode('utf-8')
assert Serializer.length_utf8(s) == len(encoded)
buffer = bytearray()
serializer = Serializer(buffer)
serializer.write_string(None, s)
assert bytes(buffer[1:]) == encoded
d = Deserializer(buffer)
decoded = d.read_string(None)
assert decoded == s
assert d.pos == len(buffer)
def test_datetime():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ datetime.datetime.now(), datetime.datetime(1969, 12, 31) ]:
s.write_datetime(None, test_value)
value = d.read_datetime(None)
truncated = test_value - datetime.timedelta(microseconds=(test_value.microsecond % 1000))
assert value == truncated
def test_bytes():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
test_value = b'abcd'
s.write_bytes(None, test_value)
assert len(buffer) == 5
value = d.read_bytes(None)
assert value == test_value
def test_cell():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
metaprop = MetaProperty(None, MetaProperty.CELL, runtime_type=MyCell1)
test_value = MyCell1()
test_value.foo = 1
s.write_cell(metaprop, test_value)
value = d.read_cell(metaprop)
assert value == test_value
def test_partial_serialization():
s = Serializer(bytearray())
d = Deserializer(s.buffer)
c1 = MyCell1()
c1.foo = 1
c2 = MyCell2()
c2.foo = 1
c2.bar = 'bar'
metaprop1 = MetaProperty(None, MetaProperty.CELL, runtime_type=type(c1))
metaprop2 = MetaProperty(None, MetaProperty.CELL, runtime_type=type(c2))
l2 = Serializer.length_cell(metaprop2, c2)
s.write_cell(metaprop2, c2)
assert l2 == len(s.buffer)
v2 = d.read_cell(metaprop2)
assert c2 == v2
d.buffer = s.buffer = bytearray()
d.pos = 0
l1 = Serializer.length_cell(metaprop1, c2)
s.write_cell(metaprop1, c2)
assert l1 == len(s.buffer)
assert l1 < l2
v1 = d.read_cell(metaprop1)
assert c1 == v1
def test_list():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
# list(int32)
metaprop = MetaProperty('List', 13, details=[ MetaProperty('None', 5) ])
test_value = [ 1, 2, 3 ]
s.write_list(metaprop, test_value)
value = d.read_list(metaprop)
assert len(value) == len(test_value)
assert value == test_value
def test_map():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
# map(int32, string)
metaprop = MetaProperty('Map', 14, details=[ MetaProperty(None, 5), MetaProperty(None, 9) ])
test_value = { 1: "one", 2: 'two', 3: 'three' }
s.write_map(metaprop, test_value)
value = d.read_map(metaprop)
assert len(value) == len(test_value)
assert value == test_value
| 26.028571 | 97 | 0.629162 |
import datetime
import sys
import pytest
sys.path.append('..')
import x2py
from x2py.deserializer import Deserializer
from x2py.serializer import Serializer
from test import *
def test_byte():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [0, 1, 0x07f, 0x080, 0x0ff ]:
s.write_byte(None, test_value)
value = d.read_byte(None)
assert value == test_value
def test_int8():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ -128, -1, 0, 1, 127 ]:
s.write_int8(None, test_value)
value = d.read_int8(None)
assert value == test_value
def test_int16():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ -32768, -1, 0, 1, 32767 ]:
s.write_int16(None, test_value)
value = d.read_int16(None)
assert value == test_value
def test_int32():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ -2147483648, -1, 0, 1, 2147483647 ]:
s.write_int32(None, test_value)
value = d.read_int32(None)
assert value == test_value
def test_int64():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ -9223372036854775808, -1, 0, 1, 9223372036854775807 ]:
s.write_int64(None, test_value)
value = d.read_int64(None)
assert value == test_value
def test_float64():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ 0.01 ]:
s.write_float64(None, test_value)
value = d.read_float64(None)
assert value == test_value
def test_nonnegative():
s = Serializer()
assert(len(s.buffer) == 0)
s.write_nonnegative(0)
assert(len(s.buffer) == 1)
d = Deserializer(s.buffer)
v, n = d.read_nonnegative()
assert(v == 0)
assert(n == 1)
assert(len(s.buffer) == 1)
s.write_nonnegative(1)
v, n = d.read_nonnegative()
assert(v == 1)
assert(n == 1)
s.write_nonnegative(127)
v, n = d.read_nonnegative()
assert(v == 127)
assert(n == 1)
s.write_nonnegative(128)
v, n = d.read_nonnegative()
assert(v == 128)
assert(n == 2)
def test_string():
if sys.version_info.major >= 3:
strs = ['abcd', '한글']
else:
strs = ['abcd']
for s in strs:
encoded = s.encode('utf-8')
assert Serializer.length_utf8(s) == len(encoded)
buffer = bytearray()
serializer = Serializer(buffer)
serializer.write_string(None, s)
assert bytes(buffer[1:]) == encoded
d = Deserializer(buffer)
decoded = d.read_string(None)
assert decoded == s
assert d.pos == len(buffer)
def test_datetime():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
for test_value in [ datetime.datetime.now(), datetime.datetime(1969, 12, 31) ]:
s.write_datetime(None, test_value)
value = d.read_datetime(None)
truncated = test_value - datetime.timedelta(microseconds=(test_value.microsecond % 1000))
assert value == truncated
def test_bytes():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
test_value = b'abcd'
s.write_bytes(None, test_value)
assert len(buffer) == 5
value = d.read_bytes(None)
assert value == test_value
def test_cell():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
metaprop = MetaProperty(None, MetaProperty.CELL, runtime_type=MyCell1)
test_value = MyCell1()
test_value.foo = 1
s.write_cell(metaprop, test_value)
value = d.read_cell(metaprop)
assert value == test_value
def test_partial_serialization():
s = Serializer(bytearray())
d = Deserializer(s.buffer)
c1 = MyCell1()
c1.foo = 1
c2 = MyCell2()
c2.foo = 1
c2.bar = 'bar'
metaprop1 = MetaProperty(None, MetaProperty.CELL, runtime_type=type(c1))
metaprop2 = MetaProperty(None, MetaProperty.CELL, runtime_type=type(c2))
l2 = Serializer.length_cell(metaprop2, c2)
s.write_cell(metaprop2, c2)
assert l2 == len(s.buffer)
v2 = d.read_cell(metaprop2)
assert c2 == v2
d.buffer = s.buffer = bytearray()
d.pos = 0
l1 = Serializer.length_cell(metaprop1, c2)
s.write_cell(metaprop1, c2)
assert l1 == len(s.buffer)
assert l1 < l2
v1 = d.read_cell(metaprop1)
assert c1 == v1
def test_list():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
metaprop = MetaProperty('List', 13, details=[ MetaProperty('None', 5) ])
test_value = [ 1, 2, 3 ]
s.write_list(metaprop, test_value)
value = d.read_list(metaprop)
assert len(value) == len(test_value)
assert value == test_value
def test_map():
buffer = bytearray()
s = Serializer(buffer)
d = Deserializer(buffer)
metaprop = MetaProperty('Map', 14, details=[ MetaProperty(None, 5), MetaProperty(None, 9) ])
test_value = { 1: "one", 2: 'two', 3: 'three' }
s.write_map(metaprop, test_value)
value = d.read_map(metaprop)
assert len(value) == len(test_value)
assert value == test_value
| true | true |
f7f57abc2a3d0ab45a5d8414a116e8a23bbed7e7 | 5,944 | py | Python | flask_web_application/Complete_Python3_app/Adafruit_Python_DHT/Adafruit_DHT/Beaglebone_Black.py | NguyenDangLamPhuong/face_recognization | 55c2537c6352eea44b5f8442a50ceb6d873bca62 | [
"MIT"
] | null | null | null | flask_web_application/Complete_Python3_app/Adafruit_Python_DHT/Adafruit_DHT/Beaglebone_Black.py | NguyenDangLamPhuong/face_recognization | 55c2537c6352eea44b5f8442a50ceb6d873bca62 | [
"MIT"
] | null | null | null | flask_web_application/Complete_Python3_app/Adafruit_Python_DHT/Adafruit_DHT/Beaglebone_Black.py | NguyenDangLamPhuong/face_recognization | 55c2537c6352eea44b5f8442a50ceb6d873bca62 | [
"MIT"
] | null | null | null | # Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from . import common
from . import Beaglebone_Black_Driver as driver
# Define mapping of pin names to GPIO base and number.
# Adapted from Adafruit_BBIO code Beaglebone Black system reference.
pin_to_gpio = {
"P9_11": (0, 30),
"P9_12": (1, 28),
"P9_13": (0, 31),
"P9_14": (1, 18),
"P9_15": (1, 16),
"P9_16": (1, 19),
"P9_17": (0, 5),
"P9_18": (0, 4),
"P9_19": (0, 13),
"P9_20": (0, 12),
"P9_21": (0, 3),
"P9_22": (0, 2),
"P9_23": (1, 17),
"P9_24": (0, 15),
"P9_25": (3, 21),
"P9_26": (0, 14),
"P9_27": (3, 19),
"P9_28": (3, 17),
"P9_29": (3, 15),
"P9_30": (3, 16),
"P9_31": (3, 14),
"P9_41": (0, 20),
"P9_42": (0, 7),
"UART4_RXD": (0, 30),
"UART4_TXD": (0, 31),
"EHRPWM1A": (1, 18),
"EHRPWM1B": (1, 19),
"I2C1_SCL": (0, 5),
"I2C1_SDA": (0, 4),
"I2C2_SCL": (0, 13),
"I2C2_SDA": (0, 12),
"UART2_TXD": (0, 3),
"UART2_RXD": (0, 2),
"UART1_TXD": (0, 15),
"UART1_RXD": (0, 14),
"SPI1_CS0": (3, 17),
"SPI1_D0": (3, 15),
"SPI1_D1": (3, 16),
"SPI1_SCLK": (3, 14),
"CLKOUT2": (0, 20),
"30": (0, 30),
"60": (1, 28),
"31": (0, 31),
"50": (1, 18),
"48": (1, 16),
"51": (1, 19),
"5": (0, 5),
"4": (0, 4),
"13": (0, 13),
"12": (0, 12),
"3": (0, 3),
"2": (0, 2),
"49": (1, 17),
"15": (0, 15),
"117": (3, 21),
"14": (0, 14),
"115": (3, 19),
"113": (3, 17),
"111": (3, 15),
"112": (3, 16),
"110": (3, 14),
"20": (0, 20),
"7": (0, 7),
"P8_3": (1, 6),
"P8_4": (1, 7),
"P8_5": (1, 2),
"P8_6": (1, 3),
"P8_7": (2, 2),
"P8_8": (2, 3),
"P8_9": (2, 5),
"P8_10": (2, 4),
"P8_11": (1, 13),
"P8_12": (1, 12),
"P8_13": (0, 23),
"P8_14": (0, 26),
"P8_15": (1, 15),
"P8_16": (1, 14),
"P8_17": (0, 27),
"P8_18": (2, 1),
"P8_19": (0, 22),
"P8_20": (1, 31),
"P8_21": (1, 30),
"P8_22": (1, 5),
"P8_23": (1, 4),
"P8_24": (1, 1),
"P8_25": (1, 0),
"P8_26": (1, 29),
"P8_27": (2, 22),
"P8_28": (2, 24),
"P8_29": (2, 23),
"P8_30": (2, 25),
"P8_31": (0, 10),
"P8_32": (0, 11),
"P8_33": (0, 9),
"P8_34": (2, 17),
"P8_35": (0, 8),
"P8_36": (2, 16),
"P8_37": (2, 14),
"P8_38": (2, 15),
"P8_39": (2, 12),
"P8_40": (2, 13),
"P8_41": (2, 10),
"P8_42": (2, 11),
"P8_43": (2, 8),
"P8_44": (2, 9),
"P8_45": (2, 6),
"P8_46": (2, 7),
"TIMER4": (2, 2),
"TIMER7": (2, 3),
"TIMER5": (2, 5),
"TIMER6": (2, 4),
"EHRPWM2B": (0, 23),
"EHRPWM2A": (0, 22),
"UART5_CTSN": (0, 10),
"UART5_RTSN": (0, 11),
"UART4_RTSN": (0, 9),
"UART3_RTSN": (2, 17),
"UART4_CTSN": (0, 8),
"UART3_CTSN": (2, 16),
"UART5_TXD": (2, 14),
"UART5_RXD": (2, 15),
"38": (1, 6),
"39": (1, 7),
"34": (1, 2),
"35": (1, 3),
"66": (2, 2),
"67": (2, 3),
"69": (2, 5),
"68": (2, 4),
"45": (1, 13),
"44": (1, 12),
"23": (0, 23),
"26": (0, 26),
"47": (1, 15),
"46": (1, 14),
"27": (0, 27),
"65": (2, 1),
"22": (0, 22),
"63": (1, 31),
"62": (1, 30),
"37": (1, 5),
"36": (1, 4),
"33": (1, 1),
"32": (1, 0),
"61": (1, 29),
"86": (2, 22),
"88": (2, 24),
"87": (2, 23),
"89": (2, 25),
"10": (0, 10),
"11": (0, 11),
"9": (0, 9),
"81": (2, 17),
"8": (0, 8),
"80": (2, 16),
"78": (2, 14),
"79": (2, 15),
"76": (2, 12),
"77": (2, 13),
"74": (2, 10),
"75": (2, 11),
"72": (2, 8),
"73": (2, 9),
"70": (2, 6),
"71": (2, 7)
}
def read(sensor, pin):
# Validate GPIO and map it to GPIO base and number.
gpio = pin_to_gpio.get(str(pin).upper(), None)
if gpio is None:
# Couldn't find in mapping, check if pin looks like GPIO<base>_<number>
match = re.match('GPIO([0123])_(\d+)', pin, re.IGNORECASE)
if match is not None:
gpio = (int(match.group(1)), int(match.group(2)))
if gpio is None or gpio[0] < 0 or gpio[0] > 3 or gpio[1] < 0 or gpio[1] > 31:
raise ValueError('Pin must be a valid GPIO identifier like P9_12 or GPIO1_28.')
# Get a reading from C driver code.
result, humidity, temp = driver.read(sensor, gpio[0], gpio[1])
if result in common.TRANSIENT_ERRORS:
# Signal no result could be obtained, but the caller can retry.
return (None, None)
elif result == common.DHT_ERROR_GPIO:
raise RuntimeError('Error accessing GPIO. Make sure program is run as root with sudo!')
elif result != common.DHT_SUCCESS:
# Some kind of error occured.
raise RuntimeError('Error calling DHT test driver read: {0}'.format(result))
return (humidity, temp)
| 27.266055 | 95 | 0.495121 |
import re
from . import common
from . import Beaglebone_Black_Driver as driver
pin_to_gpio = {
"P9_11": (0, 30),
"P9_12": (1, 28),
"P9_13": (0, 31),
"P9_14": (1, 18),
"P9_15": (1, 16),
"P9_16": (1, 19),
"P9_17": (0, 5),
"P9_18": (0, 4),
"P9_19": (0, 13),
"P9_20": (0, 12),
"P9_21": (0, 3),
"P9_22": (0, 2),
"P9_23": (1, 17),
"P9_24": (0, 15),
"P9_25": (3, 21),
"P9_26": (0, 14),
"P9_27": (3, 19),
"P9_28": (3, 17),
"P9_29": (3, 15),
"P9_30": (3, 16),
"P9_31": (3, 14),
"P9_41": (0, 20),
"P9_42": (0, 7),
"UART4_RXD": (0, 30),
"UART4_TXD": (0, 31),
"EHRPWM1A": (1, 18),
"EHRPWM1B": (1, 19),
"I2C1_SCL": (0, 5),
"I2C1_SDA": (0, 4),
"I2C2_SCL": (0, 13),
"I2C2_SDA": (0, 12),
"UART2_TXD": (0, 3),
"UART2_RXD": (0, 2),
"UART1_TXD": (0, 15),
"UART1_RXD": (0, 14),
"SPI1_CS0": (3, 17),
"SPI1_D0": (3, 15),
"SPI1_D1": (3, 16),
"SPI1_SCLK": (3, 14),
"CLKOUT2": (0, 20),
"30": (0, 30),
"60": (1, 28),
"31": (0, 31),
"50": (1, 18),
"48": (1, 16),
"51": (1, 19),
"5": (0, 5),
"4": (0, 4),
"13": (0, 13),
"12": (0, 12),
"3": (0, 3),
"2": (0, 2),
"49": (1, 17),
"15": (0, 15),
"117": (3, 21),
"14": (0, 14),
"115": (3, 19),
"113": (3, 17),
"111": (3, 15),
"112": (3, 16),
"110": (3, 14),
"20": (0, 20),
"7": (0, 7),
"P8_3": (1, 6),
"P8_4": (1, 7),
"P8_5": (1, 2),
"P8_6": (1, 3),
"P8_7": (2, 2),
"P8_8": (2, 3),
"P8_9": (2, 5),
"P8_10": (2, 4),
"P8_11": (1, 13),
"P8_12": (1, 12),
"P8_13": (0, 23),
"P8_14": (0, 26),
"P8_15": (1, 15),
"P8_16": (1, 14),
"P8_17": (0, 27),
"P8_18": (2, 1),
"P8_19": (0, 22),
"P8_20": (1, 31),
"P8_21": (1, 30),
"P8_22": (1, 5),
"P8_23": (1, 4),
"P8_24": (1, 1),
"P8_25": (1, 0),
"P8_26": (1, 29),
"P8_27": (2, 22),
"P8_28": (2, 24),
"P8_29": (2, 23),
"P8_30": (2, 25),
"P8_31": (0, 10),
"P8_32": (0, 11),
"P8_33": (0, 9),
"P8_34": (2, 17),
"P8_35": (0, 8),
"P8_36": (2, 16),
"P8_37": (2, 14),
"P8_38": (2, 15),
"P8_39": (2, 12),
"P8_40": (2, 13),
"P8_41": (2, 10),
"P8_42": (2, 11),
"P8_43": (2, 8),
"P8_44": (2, 9),
"P8_45": (2, 6),
"P8_46": (2, 7),
"TIMER4": (2, 2),
"TIMER7": (2, 3),
"TIMER5": (2, 5),
"TIMER6": (2, 4),
"EHRPWM2B": (0, 23),
"EHRPWM2A": (0, 22),
"UART5_CTSN": (0, 10),
"UART5_RTSN": (0, 11),
"UART4_RTSN": (0, 9),
"UART3_RTSN": (2, 17),
"UART4_CTSN": (0, 8),
"UART3_CTSN": (2, 16),
"UART5_TXD": (2, 14),
"UART5_RXD": (2, 15),
"38": (1, 6),
"39": (1, 7),
"34": (1, 2),
"35": (1, 3),
"66": (2, 2),
"67": (2, 3),
"69": (2, 5),
"68": (2, 4),
"45": (1, 13),
"44": (1, 12),
"23": (0, 23),
"26": (0, 26),
"47": (1, 15),
"46": (1, 14),
"27": (0, 27),
"65": (2, 1),
"22": (0, 22),
"63": (1, 31),
"62": (1, 30),
"37": (1, 5),
"36": (1, 4),
"33": (1, 1),
"32": (1, 0),
"61": (1, 29),
"86": (2, 22),
"88": (2, 24),
"87": (2, 23),
"89": (2, 25),
"10": (0, 10),
"11": (0, 11),
"9": (0, 9),
"81": (2, 17),
"8": (0, 8),
"80": (2, 16),
"78": (2, 14),
"79": (2, 15),
"76": (2, 12),
"77": (2, 13),
"74": (2, 10),
"75": (2, 11),
"72": (2, 8),
"73": (2, 9),
"70": (2, 6),
"71": (2, 7)
}
def read(sensor, pin):
gpio = pin_to_gpio.get(str(pin).upper(), None)
if gpio is None:
match = re.match('GPIO([0123])_(\d+)', pin, re.IGNORECASE)
if match is not None:
gpio = (int(match.group(1)), int(match.group(2)))
if gpio is None or gpio[0] < 0 or gpio[0] > 3 or gpio[1] < 0 or gpio[1] > 31:
raise ValueError('Pin must be a valid GPIO identifier like P9_12 or GPIO1_28.')
# Get a reading from C driver code.
result, humidity, temp = driver.read(sensor, gpio[0], gpio[1])
if result in common.TRANSIENT_ERRORS:
# Signal no result could be obtained, but the caller can retry.
return (None, None)
elif result == common.DHT_ERROR_GPIO:
raise RuntimeError('Error accessing GPIO. Make sure program is run as root with sudo!')
elif result != common.DHT_SUCCESS:
# Some kind of error occured.
raise RuntimeError('Error calling DHT test driver read: {0}'.format(result))
return (humidity, temp)
| true | true |
f7f57b4c32e312280ee8b38d7dfed7a7d52abfab | 4,903 | py | Python | 101-sutter-county/final-results/parse_sutter.py | worace/california-2016-election-precinct-maps | 39e9a6e797aca1b5b5f5129294807dfadb5a795d | [
"MIT"
] | 82 | 2016-12-30T02:07:31.000Z | 2022-02-26T00:39:38.000Z | 101-sutter-county/final-results/parse_sutter.py | worace/california-2016-election-precinct-maps | 39e9a6e797aca1b5b5f5129294807dfadb5a795d | [
"MIT"
] | 3 | 2017-01-16T19:12:31.000Z | 2017-04-03T03:07:29.000Z | 101-sutter-county/final-results/parse_sutter.py | worace/california-2016-election-precinct-maps | 39e9a6e797aca1b5b5f5129294807dfadb5a795d | [
"MIT"
] | 29 | 2017-01-02T08:45:30.000Z | 2021-11-17T15:19:31.000Z | import sys
import os
import re
import csv
import pandas as pd
import numpy as np
from subprocess import call
fips = '101'
call(['pdftotext','-layout',sys.argv[1]])
pages = []
currentPage = ''
pageNo = 0
if not os.path.exists('results/'):
os.makedirs('results/')
with open(sys.argv[1].replace('pdf','txt')) as file:
filename = '%s.csv' % sys.argv[1].replace('.pdf','')
out = open(filename,'w')
csvwriter = csv.writer(out)
# remove the top of each page (some number of lines after the )
resultstring = re.sub(r'(Vote-By-Mail Ballot Totals ?|All-Mail-Ballot Totals ?)\n? *(\d+)',r'\1 \2','%s' % file.read(),flags=re.MULTILINE)
resultstring = re.sub(r'()(.*\n)+?(.+? \d{4} |.+? Vote-By-Mail Ballot Totals +|.+? All-Mail-Ballot Totals +)',r'\3',resultstring,flags=re.MULTILINE)
# remove the bottom with precinct totals
resultstring = re.sub(r'(\n\nPrecinct Totals)(.*\n)+',r'',resultstring,flags=re.MULTILINE)
print resultstring
# resultstring = re.sub(r' +Turnout \(%\)\n +',',',resultstring,flags=re.MULTILINE)
# remove insufficient turnout line, while keeping numbers scattered through it
# resultstring = re.sub(r'\*+|\s+Insufficient\n +|\n +| +Turnout\s+| +to +Protect |Voter +Privacy\n +| +\*+ *',' ',resultstring,flags=re.MULTILINE)
# replace multiple spaces with a comma
resultstring = re.sub(r' {2,}(?!\n)',',',resultstring,flags=re.MULTILINE)
# put number and name in separate columns
resultstring = re.sub(r'(.+? )(\d{4},|- .+?,)',r'\1,\2',resultstring)
results = resultstring.split('\n')
# cut off the last leftover line
final_results = results[0:-1];
# column_names = ['pct16','pctnum','reg','ballots','turnout','pres_stein','pres_clinton','pres_lariva','pres_trump','pres_johnson','ussenate_sanchez','ussenate_harris']
# column_names = ['pct16','pctnum','reg','ballots','turnout','prop51_yes','prop51_no','prop52_yes','prop52_no','prop53_yes','prop53_no','prop54_yes','prop54_no']
# column_names = ['pct16','pctnum','reg','ballots','turnout','prop55_yes','prop55_no','prop56_yes','prop56_no','prop57_yes','prop57_no','prop58_yes','prop58_no']
# column_names = ['pct16','pctnum','reg','ballots','turnout','prop59_yes','prop59_no','prop60_yes','prop60_no','prop61_yes','prop61_no','prop62_yes','prop62_no']
# column_names = ['pct16','pctnum','reg','ballots','turnout','prop63_yes','prop63_no','prop64_yes','prop64_no','prop65_yes','prop65_no','prop66_yes','prop66_no']
column_names = ['pct16','pctnum','reg','ballots','turnout','prop67_yes','prop67_no']
csvwriter.writerow(column_names)
# print final_results[2191]
results_array = []
for row in final_results:
row_array = row.split(',')[0:len(column_names)]
row_array[1] = '%s-%s' % (fips,row_array[1])
results_array.append(row_array)
csvwriter.writerow(row_array)
out.close()
# out.write('pct16,reg,ballots,turnout,pres_stein,pres_clinton,pres_lariva,pres_trump,pres_johnson,ussen_sanchez,ussen_harris,?\n')
# column_types = [agate.Text(),agate.Number(),agate.Number(),agate.Number(),agate.Number(),agate.Number(),agate.Number(),agate.Number(),agate.Number(),agate.Number(),agate.Number()]
# result_table = agate.Table.from_csv(results_array,column_names,column_types)
# result_table.to_csv('results/agate_output.csv')
result_table = pd.read_csv(filename,dtype={'pct16':str})
by_pct = result_table.groupby(['pct16']).agg({
# 'pctnum':'max','reg':'min','ballots':'min','turnout':'min','pres_stein':'sum','pres_clinton':'sum','pres_lariva':'sum','pres_trump':'sum','pres_johnson':'sum','ussenate_sanchez':'sum','ussenate_harris':'sum'
# 'pctnum':'max','reg':'min','ballots':'min','turnout':'min','prop51_yes':'sum','prop51_no':'sum','prop52_yes':'sum','prop52_no':'sum','prop53_yes':'sum','prop53_no':'sum','prop54_yes':'sum','prop54_no':'sum'
# 'pctnum':'max','reg':'min','ballots':'min','turnout':'min','prop55_yes':'sum','prop55_no':'sum','prop56_yes':'sum','prop56_no':'sum','prop57_yes':'sum','prop57_no':'sum','prop58_yes':'sum','prop58_no':'sum'
# 'pctnum':'max','reg':'min','ballots':'min','turnout':'min','prop59_yes':'sum','prop59_no':'sum','prop60_yes':'sum','prop60_no':'sum','prop61_yes':'sum','prop61_no':'sum','prop62_yes':'sum','prop62_no':'sum'
# 'pctnum':'max','reg':'min','ballots':'min','turnout':'min','prop63_yes':'sum','prop63_no':'sum','prop64_yes':'sum','prop64_no':'sum','prop65_yes':'sum','prop65_no':'sum','prop66_yes':'sum','prop66_no':'sum'
'pctnum':'max','reg':'min','ballots':'min','turnout':'min','prop67_yes':'sum','prop67_no':'sum'
})
by_pct[column_names[1:2] + column_names[5:]].to_csv('results/%s-results.csv' % sys.argv[1].replace('.pdf',''))
# print resultstring
# results = [re.findall(r'(?: *)(\d{7})(?:(\n)(.*\n){2,3} *)(Total.+)',resultstring,flags=re.MULTILINE)]
# for x in range(0,len(results[0])):
# out.write('%s,%s\n' % (results[0][x][0],re.sub(r' +',',',''.join(results[0][x][len(results[0][x])-1])))) | 61.2875 | 212 | 0.674485 | import sys
import os
import re
import csv
import pandas as pd
import numpy as np
from subprocess import call
fips = '101'
call(['pdftotext','-layout',sys.argv[1]])
pages = []
currentPage = ''
pageNo = 0
if not os.path.exists('results/'):
os.makedirs('results/')
with open(sys.argv[1].replace('pdf','txt')) as file:
filename = '%s.csv' % sys.argv[1].replace('.pdf','')
out = open(filename,'w')
csvwriter = csv.writer(out)
resultstring = re.sub(r'(Vote-By-Mail Ballot Totals ?|All-Mail-Ballot Totals ?)\n? *(\d+)',r'\1 \2','%s' % file.read(),flags=re.MULTILINE)
resultstring = re.sub(r'()(.*\n)+?(.+? \d{4} |.+? Vote-By-Mail Ballot Totals +|.+? All-Mail-Ballot Totals +)',r'\3',resultstring,flags=re.MULTILINE)
resultstring = re.sub(r'(\n\nPrecinct Totals)(.*\n)+',r'',resultstring,flags=re.MULTILINE)
print resultstring
resultstring = re.sub(r' {2,}(?!\n)',',',resultstring,flags=re.MULTILINE)
resultstring = re.sub(r'(.+? )(\d{4},|- .+?,)',r'\1,\2',resultstring)
results = resultstring.split('\n')
final_results = results[0:-1];
column_names = ['pct16','pctnum','reg','ballots','turnout','prop67_yes','prop67_no']
csvwriter.writerow(column_names)
results_array = []
for row in final_results:
row_array = row.split(',')[0:len(column_names)]
row_array[1] = '%s-%s' % (fips,row_array[1])
results_array.append(row_array)
csvwriter.writerow(row_array)
out.close()
result_table = pd.read_csv(filename,dtype={'pct16':str})
by_pct = result_table.groupby(['pct16']).agg({
'pctnum':'max','reg':'min','ballots':'min','turnout':'min','prop67_yes':'sum','prop67_no':'sum'
})
by_pct[column_names[1:2] + column_names[5:]].to_csv('results/%s-results.csv' % sys.argv[1].replace('.pdf',''))
| false | true |
f7f57b4cf9e07d363a5f52022fb6ab77136ae4e1 | 29,817 | py | Python | tests/git_cl_test.py | yetu/repotools | 1f52004a33ee27f539bb4c831b8e8a37751550a8 | [
"BSD-3-Clause"
] | null | null | null | tests/git_cl_test.py | yetu/repotools | 1f52004a33ee27f539bb4c831b8e8a37751550a8 | [
"BSD-3-Clause"
] | null | null | null | tests/git_cl_test.py | yetu/repotools | 1f52004a33ee27f539bb4c831b8e8a37751550a8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git_cl.py."""
import os
import StringIO
import stat
import sys
import unittest
import re
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.auto_stub import TestCase
import git_cl
import git_common
import subprocess2
import presubmit_support
class PresubmitMock(object):
def __init__(self, *args, **kwargs):
self.reviewers = []
@staticmethod
def should_continue():
return True
class RietveldMock(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def get_description(issue):
return 'Issue: %d' % issue
@staticmethod
def get_issue_properties(_issue, _messages):
return {
'reviewers': ['joe@chromium.org', 'john@chromium.org'],
'messages': [
{
'approval': True,
'sender': 'john@chromium.org',
},
],
}
class WatchlistsMock(object):
def __init__(self, _):
pass
@staticmethod
def GetWatchersForPaths(_):
return ['joe@example.com']
class CodereviewSettingsFileMock(object):
def __init__(self):
pass
# pylint: disable=R0201
def read(self):
return ("CODE_REVIEW_SERVER: gerrit.chromium.org\n" +
"GERRIT_HOST: gerrit.chromium.org\n" +
"GERRIT_PORT: 29418\n")
class TestGitCl(TestCase):
def setUp(self):
super(TestGitCl, self).setUp()
self.calls = []
self._calls_done = 0
self.mock(subprocess2, 'call', self._mocked_call)
self.mock(subprocess2, 'check_call', self._mocked_call)
self.mock(subprocess2, 'check_output', self._mocked_call)
self.mock(subprocess2, 'communicate', self._mocked_call)
self.mock(subprocess2, 'Popen', self._mocked_call)
self.mock(git_common, 'get_or_create_merge_base',
lambda *a: (
self._mocked_call(['get_or_create_merge_base']+list(a))))
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: '')
self.mock(git_cl, 'ask_for_data', self._mocked_call)
self.mock(git_cl.breakpad, 'post', self._mocked_call)
self.mock(git_cl.breakpad, 'SendStack', self._mocked_call)
self.mock(git_cl.presubmit_support, 'DoPresubmitChecks', PresubmitMock)
self.mock(git_cl.rietveld, 'Rietveld', RietveldMock)
self.mock(git_cl.rietveld, 'CachingRietveld', RietveldMock)
self.mock(git_cl.upload, 'RealMain', self.fail)
self.mock(git_cl.watchlists, 'Watchlists', WatchlistsMock)
# It's important to reset settings to not have inter-tests interference.
git_cl.settings = None
def tearDown(self):
if not self.has_failed():
self.assertEquals([], self.calls)
super(TestGitCl, self).tearDown()
def _mocked_call(self, *args, **_kwargs):
self.assertTrue(
self.calls,
'@%d Expected: <Missing> Actual: %r' % (self._calls_done, args))
expected_args, result = self.calls.pop(0)
# Also logs otherwise it could get caught in a try/finally and be hard to
# diagnose.
if expected_args != args:
msg = '@%d Expected: %r Actual: %r' % (
self._calls_done, expected_args, args)
git_cl.logging.error(msg)
self.fail(msg)
self._calls_done += 1
return result
@classmethod
def _upload_calls(cls, similarity, find_copies, private):
return (cls._git_base_calls(similarity, find_copies) +
cls._git_upload_calls(private))
@classmethod
def _upload_no_rev_calls(cls, similarity, find_copies):
return (cls._git_base_calls(similarity, find_copies) +
cls._git_upload_no_rev_calls())
@classmethod
def _git_base_calls(cls, similarity, find_copies):
if similarity is None:
similarity = '50'
similarity_call = ((['git', 'config', '--int', '--get',
'branch.master.git-cl-similarity'],), '')
else:
similarity_call = ((['git', 'config', '--int',
'branch.master.git-cl-similarity', similarity],), '')
if find_copies is None:
find_copies = True
find_copies_call = ((['git', 'config', '--int', '--get',
'branch.master.git-find-copies'],), '')
else:
val = str(int(find_copies))
find_copies_call = ((['git', 'config', '--int',
'branch.master.git-find-copies', val],), '')
if find_copies:
stat_call = ((['git', 'diff', '--no-ext-diff', '--stat',
'--find-copies-harder', '-l100000', '-C'+similarity,
'fake_ancestor_sha', 'HEAD'],), '+dat')
else:
stat_call = ((['git', 'diff', '--no-ext-diff', '--stat',
'-M'+similarity, 'fake_ancestor_sha', 'HEAD'],), '+dat')
return [
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.server'],),
'codereview.example.com'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
similarity_call,
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
find_copies_call,
((['git', 'update-index', '--refresh', '-q'],), ''),
((['git', 'diff-index', '--name-status', 'HEAD'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master', 'master'],),
'fake_ancestor_sha'),
] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git', 'diff', '--name-status', '--no-renames', '-r',
'fake_ancestor_sha...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.rietveldissue'],), ''),
((['git', 'config', 'branch.master.rietveldpatchset'],),
''),
((['git', 'log', '--pretty=format:%s%n%n%b',
'fake_ancestor_sha...'],),
'foo'),
((['git', 'config', 'user.email'],), 'me@example.com'),
stat_call,
((['git', 'config', 'gerrit.host'],), ''),
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
'desc\n'),
((['git', 'config', 'rietveld.bug-prefix'],), ''),
]
@classmethod
def _git_upload_no_rev_calls(cls):
return [
((['git', 'config', 'core.editor'],), ''),
]
@classmethod
def _git_upload_calls(cls, private):
if private:
cc_call = []
private_call = []
else:
cc_call = [((['git', 'config', 'rietveld.cc'],), '')]
private_call = [
((['git', 'config', 'rietveld.private'],), '')]
return [
((['git', 'config', 'core.editor'],), ''),
] + cc_call + private_call + [
((['git', 'config', 'branch.master.base-url'],), ''),
((['git',
'config', '--local', '--get-regexp', '^svn-remote\\.'],),
(('', None), 0)),
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'svn', 'info'],), ''),
((['git', 'config', 'rietveld.project'],), ''),
((['git',
'config', 'branch.master.rietveldissue', '1'],), ''),
((['git', 'config', 'branch.master.rietveldserver',
'https://codereview.example.com'],), ''),
((['git',
'config', 'branch.master.rietveldpatchset', '2'],), ''),
((['git', 'rev-parse', 'HEAD'],), 'hash'),
((['git', 'symbolic-ref', 'HEAD'],), 'hash'),
((['git',
'config', 'branch.hash.last-upload-hash', 'hash'],), ''),
]
@staticmethod
def _git_sanity_checks(diff_base, working_branch):
fake_ancestor = 'fake_ancestor'
fake_cl = 'fake_cl_for_patch'
return [
# Calls to verify branch point is ancestor
((['git',
'rev-parse', '--verify', diff_base],), fake_ancestor),
((['git',
'merge-base', fake_ancestor, 'HEAD'],), fake_ancestor),
((['git',
'rev-list', '^' + fake_ancestor, 'HEAD'],), fake_cl),
# Mock a config miss (error code 1)
((['git',
'config', 'gitcl.remotebranch'],), (('', None), 1)),
# Call to GetRemoteBranch()
((['git',
'config', 'branch.%s.merge' % working_branch],),
'refs/heads/master'),
((['git',
'config', 'branch.%s.remote' % working_branch],), 'origin'),
((['git', 'rev-list', '^' + fake_ancestor,
'refs/remotes/origin/master'],), ''),
]
@classmethod
def _dcommit_calls_1(cls):
return [
((['git',
'config', '--local', '--get-regexp', '^svn-remote\\.'],),
((('svn-remote.svn.url svn://svn.chromium.org/chrome\n'
'svn-remote.svn.fetch trunk/src:refs/remotes/origin/master'),
None),
0)),
((['git', 'config', 'rietveld.autoupdate'],),
''),
((['git',
'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git', 'config', '--int', '--get',
'branch.working.git-cl-similarity'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git', 'config', '--int', '--get',
'branch.working.git-find-copies'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git',
'config', 'branch.working.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.working.remote'],), 'origin'),
((['git', 'config', 'branch.working.merge'],),
'refs/heads/master'),
((['git', 'config', 'branch.working.remote'],), 'origin'),
((['git', 'rev-list', '--merges',
'--grep=^SVN changes up to revision [0-9]*$',
'refs/remotes/origin/master^!'],), ''),
((['git', 'update-index', '--refresh', '-q'],), ''),
((['git', 'diff-index', '--name-status', 'HEAD'],), ''),
((['git', 'rev-list', '^refs/heads/working',
'refs/remotes/origin/master'],),
''),
((['git',
'log', '--grep=^git-svn-id:', '-1', '--pretty=format:%H'],),
'3fc18b62c4966193eb435baabe2d18a3810ec82e'),
((['git',
'rev-list', '^3fc18b62c4966193eb435baabe2d18a3810ec82e',
'refs/remotes/origin/master'],), ''),
((['git',
'merge-base', 'refs/remotes/origin/master', 'HEAD'],),
'fake_ancestor_sha'),
]
@classmethod
def _dcommit_calls_normal(cls):
return [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],),
'00ff397798ea57439712ed7e04ab96e13969ef40'),
((['git',
'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...',
'.'],),
'M\tPRESUBMIT.py'),
((['git',
'config', 'branch.working.rietveldissue'],), '12345'),
((['git',
'config', 'branch.working.rietveldpatchset'],), '31137'),
((['git', 'config', 'branch.working.rietveldserver'],),
'codereview.example.com'),
((['git', 'config', 'user.email'],), 'author@example.com'),
((['git', 'config', 'rietveld.tree-status-url'],), ''),
]
@classmethod
def _dcommit_calls_bypassed(cls):
return [
((['git',
'config', 'branch.working.rietveldissue'],), '12345'),
((['git', 'config', 'branch.working.rietveldserver'],),
'codereview.example.com'),
((['git', 'config', 'rietveld.tree-status-url'],), ''),
(('GitClHooksBypassedCommit',
'Issue https://codereview.example.com/12345 bypassed hook when '
'committing (tree status was "unset")'), None),
]
@classmethod
def _dcommit_calls_3(cls):
return [
((['git',
'diff', '--no-ext-diff', '--stat', '--find-copies-harder',
'-l100000', '-C50', 'fake_ancestor_sha',
'refs/heads/working'],),
(' PRESUBMIT.py | 2 +-\n'
' 1 files changed, 1 insertions(+), 1 deletions(-)\n')),
(('About to commit; enter to confirm.',), None),
((['git', 'show-ref', '--quiet', '--verify',
'refs/heads/git-cl-commit'],),
(('', None), 0)),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
((['git', 'show-ref', '--quiet', '--verify',
'refs/heads/git-cl-cherry-pick'],), ''),
((['git', 'rev-parse', '--show-cdup'],), '\n'),
((['git', 'checkout', '-q', '-b', 'git-cl-commit'],), ''),
((['git', 'reset', '--soft', 'fake_ancestor_sha'],), ''),
((['git', 'commit', '-m',
'Issue: 12345\n\nR=john@chromium.org\n\n'
'Review URL: https://codereview.example.com/12345'],),
''),
((['git',
'svn', 'dcommit', '-C50', '--no-rebase', '--rmdir'],),
(('', None), 0)),
((['git', 'checkout', '-q', 'working'],), ''),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
]
@staticmethod
def _cmd_line(description, args, similarity, find_copies, private):
"""Returns the upload command line passed to upload.RealMain()."""
return [
'upload', '--assume_yes', '--server',
'https://codereview.example.com',
'--message', description
] + args + [
'--cc', 'joe@example.com',
] + (['--private'] if private else []) + [
'--git_similarity', similarity or '50'
] + (['--git_no_find_copies'] if find_copies == False else []) + [
'fake_ancestor_sha', 'HEAD'
]
def _run_reviewer_test(
self,
upload_args,
expected_description,
returned_description,
final_description,
reviewers,
private=False):
"""Generic reviewer test framework."""
try:
similarity = upload_args[upload_args.index('--similarity')+1]
except ValueError:
similarity = None
if '--find-copies' in upload_args:
find_copies = True
elif '--no-find-copies' in upload_args:
find_copies = False
else:
find_copies = None
private = '--private' in upload_args
self.calls = self._upload_calls(similarity, find_copies, private)
def RunEditor(desc, _, **kwargs):
self.assertEquals(
'# Enter a description of the change.\n'
'# This will be displayed on the codereview site.\n'
'# The first line will also be used as the subject of the review.\n'
'#--------------------This line is 72 characters long'
'--------------------\n' +
expected_description,
desc)
return returned_description
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
def check_upload(args):
cmd_line = self._cmd_line(final_description, reviewers, similarity,
find_copies, private)
self.assertEquals(cmd_line, args)
return 1, 2
self.mock(git_cl.upload, 'RealMain', check_upload)
git_cl.main(['upload'] + upload_args)
def test_no_reviewer(self):
self._run_reviewer_test(
[],
'desc\n\nBUG=',
'# Blah blah comment.\ndesc\n\nBUG=',
'desc\n\nBUG=',
[])
def test_keep_similarity(self):
self._run_reviewer_test(
['--similarity', '70'],
'desc\n\nBUG=',
'# Blah blah comment.\ndesc\n\nBUG=',
'desc\n\nBUG=',
[])
def test_keep_find_copies(self):
self._run_reviewer_test(
['--no-find-copies'],
'desc\n\nBUG=',
'# Blah blah comment.\ndesc\n\nBUG=\n',
'desc\n\nBUG=',
[])
def test_private(self):
self._run_reviewer_test(
['--private'],
'desc\n\nBUG=',
'# Blah blah comment.\ndesc\n\nBUG=\n',
'desc\n\nBUG=',
[])
def test_reviewers_cmd_line(self):
# Reviewer is passed as-is
description = 'desc\n\nR=foo@example.com\nBUG='
self._run_reviewer_test(
['-r' 'foo@example.com'],
description,
'\n%s\n' % description,
description,
['--reviewers=foo@example.com'])
def test_reviewer_tbr_overriden(self):
# Reviewer is overriden with TBR
# Also verifies the regexp work without a trailing LF
description = 'Foo Bar\n\nTBR=reviewer@example.com'
self._run_reviewer_test(
['-r' 'foo@example.com'],
'desc\n\nR=foo@example.com\nBUG=',
description.strip('\n'),
description,
['--reviewers=reviewer@example.com'])
def test_reviewer_multiple(self):
# Handles multiple R= or TBR= lines.
description = (
'Foo Bar\nTBR=reviewer@example.com\nBUG=\nR=another@example.com')
self._run_reviewer_test(
[],
'desc\n\nBUG=',
description,
description,
['--reviewers=another@example.com,reviewer@example.com'])
def test_reviewer_send_mail(self):
# --send-mail can be used without -r if R= is used
description = 'Foo Bar\nR=reviewer@example.com'
self._run_reviewer_test(
['--send-mail'],
'desc\n\nBUG=',
description.strip('\n'),
description,
['--reviewers=reviewer@example.com', '--send_mail'])
def test_reviewer_send_mail_no_rev(self):
# Fails without a reviewer.
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
try:
self.calls = self._upload_no_rev_calls(None, None)
def RunEditor(desc, _, **kwargs):
return desc
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
self.mock(sys, 'stdout', stdout)
self.mock(sys, 'stderr', stderr)
git_cl.main(['upload', '--send-mail'])
self.fail()
except SystemExit:
self.assertEqual(
'Using 50% similarity for rename/copy detection. Override with '
'--similarity.\n',
stdout.getvalue())
self.assertEqual(
'Must specify reviewers to send email.\n', stderr.getvalue())
def test_dcommit(self):
self.calls = (
self._dcommit_calls_1() +
self._git_sanity_checks('fake_ancestor_sha', 'working') +
self._dcommit_calls_normal() +
self._dcommit_calls_3())
git_cl.main(['dcommit'])
def test_dcommit_bypass_hooks(self):
self.calls = (
self._dcommit_calls_1() +
self._dcommit_calls_bypassed() +
self._dcommit_calls_3())
git_cl.main(['dcommit', '--bypass-hooks'])
@classmethod
def _gerrit_base_calls(cls):
return [
((['git', 'config', 'rietveld.autoupdate'],),
''),
((['git',
'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', '--int', '--get',
'branch.master.git-cl-similarity'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', '--int', '--get',
'branch.master.git-find-copies'],), ''),
((['git', 'update-index', '--refresh', '-q'],), ''),
((['git', 'diff-index', '--name-status', 'HEAD'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master', 'master'],),
'fake_ancestor_sha'),
] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git',
'diff', '--name-status', '--no-renames', '-r',
'fake_ancestor_sha...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.rietveldissue'],), ''),
((['git',
'config', 'branch.master.rietveldpatchset'],), ''),
((['git',
'log', '--pretty=format:%s%n%n%b', 'fake_ancestor_sha...'],),
'foo'),
((['git', 'config', 'user.email'],), 'me@example.com'),
((['git',
'diff', '--no-ext-diff', '--stat', '--find-copies-harder',
'-l100000', '-C50', 'fake_ancestor_sha', 'HEAD'],),
'+dat'),
]
@staticmethod
def _gerrit_upload_calls(description, reviewers):
calls = [
((['git', 'config', 'gerrit.host'],),
'gerrit.example.com'),
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
description)
]
if git_cl.CHANGE_ID not in description:
calls += [
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
description),
((['git', 'commit', '--amend', '-m', description],),
''),
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
description)
]
calls += [
((['git', 'rev-list', 'origin/master..'],), ''),
((['git', 'config', 'rietveld.cc'],), '')
]
receive_pack = '--receive-pack=git receive-pack '
receive_pack += '--cc=joe@example.com' # from watch list
if reviewers:
receive_pack += ' '
receive_pack += ' '.join(
'--reviewer=' + email for email in sorted(reviewers))
receive_pack += ''
calls += [
((['git',
'push', receive_pack, 'origin', 'HEAD:refs/for/master'],),
'')
]
return calls
def _run_gerrit_upload_test(
self,
upload_args,
description,
reviewers):
"""Generic gerrit upload test framework."""
self.calls = self._gerrit_base_calls()
self.calls += self._gerrit_upload_calls(description, reviewers)
git_cl.main(['upload'] + upload_args)
def test_gerrit_upload_without_change_id(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n',
[])
def test_gerrit_no_reviewer(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\nChange-Id:123456789\n',
[])
def test_gerrit_reviewers_cmd_line(self):
self._run_gerrit_upload_test(
['-r', 'foo@example.com'],
'desc\n\nBUG=\nChange-Id:123456789',
['foo@example.com'])
def test_gerrit_reviewer_multiple(self):
self._run_gerrit_upload_test(
[],
'desc\nTBR=reviewer@example.com\nBUG=\nR=another@example.com\n'
'Change-Id:123456789\n',
['reviewer@example.com', 'another@example.com'])
def test_config_gerrit_download_hook(self):
self.mock(git_cl, 'FindCodereviewSettingsFile', CodereviewSettingsFileMock)
def ParseCodereviewSettingsContent(content):
keyvals = {}
keyvals['CODE_REVIEW_SERVER'] = 'gerrit.chromium.org'
keyvals['GERRIT_HOST'] = 'gerrit.chromium.org'
keyvals['GERRIT_PORT'] = '29418'
return keyvals
self.mock(git_cl.gclient_utils, 'ParseCodereviewSettingsContent',
ParseCodereviewSettingsContent)
self.mock(git_cl.os, 'access', self._mocked_call)
self.mock(git_cl.os, 'chmod', self._mocked_call)
src_dir = os.path.join(os.path.sep, 'usr', 'local', 'src')
def AbsPath(path):
if not path.startswith(os.path.sep):
return os.path.join(src_dir, path)
return path
self.mock(git_cl.os.path, 'abspath', AbsPath)
commit_msg_path = os.path.join(src_dir, '.git', 'hooks', 'commit-msg')
def Exists(path):
if path == commit_msg_path:
return False
# others paths, such as /usr/share/locale/....
return True
self.mock(git_cl.os.path, 'exists', Exists)
self.mock(git_cl, 'urlretrieve', self._mocked_call)
self.mock(git_cl, 'hasSheBang', self._mocked_call)
self.calls = [
((['git', 'config', 'rietveld.autoupdate'],),
''),
((['git', 'config', 'rietveld.server',
'gerrit.chromium.org'],), ''),
((['git', 'config', '--unset-all', 'rietveld.cc'],), ''),
((['git', 'config', '--unset-all',
'rietveld.private'],), ''),
((['git', 'config', '--unset-all',
'rietveld.tree-status-url'],), ''),
((['git', 'config', '--unset-all',
'rietveld.viewvc-url'],), ''),
((['git', 'config', '--unset-all',
'rietveld.bug-prefix'],), ''),
((['git', 'config', '--unset-all',
'rietveld.cpplint-regex'],), ''),
((['git', 'config', '--unset-all',
'rietveld.cpplint-ignore-regex'],), ''),
((['git', 'config', '--unset-all',
'rietveld.project'],), ''),
((['git', 'config', 'gerrit.host',
'gerrit.chromium.org'],), ''),
# DownloadHooks(False)
((['git', 'config', 'gerrit.host'],),
'gerrit.chromium.org'),
((['git', 'rev-parse', '--show-cdup'],), ''),
((commit_msg_path, os.X_OK,), False),
(('https://gerrit-review.googlesource.com/tools/hooks/commit-msg',
commit_msg_path,), ''),
((commit_msg_path,), True),
((commit_msg_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,), ''),
# GetCodereviewSettingsInteractively
((['git', 'config', 'rietveld.server'],),
'gerrit.chromium.org'),
(('Rietveld server (host[:port]) [https://gerrit.chromium.org]:',),
''),
((['git', 'config', 'rietveld.cc'],), ''),
(('CC list:',), ''),
((['git', 'config', 'rietveld.private'],), ''),
(('Private flag (rietveld only):',), ''),
((['git', 'config', 'rietveld.tree-status-url'],), ''),
(('Tree status URL:',), ''),
((['git', 'config', 'rietveld.viewvc-url'],), ''),
(('ViewVC URL:',), ''),
# DownloadHooks(True)
((['git', 'config', 'rietveld.bug-prefix'],), ''),
(('Bug Prefix:',), ''),
((commit_msg_path, os.X_OK,), True),
]
git_cl.main(['config'])
def test_update_reviewers(self):
data = [
('foo', [], 'foo'),
('foo\nR=xx', [], 'foo\nR=xx'),
('foo\nTBR=xx', [], 'foo\nTBR=xx'),
('foo', ['a@c'], 'foo\n\nR=a@c'),
('foo\nR=xx', ['a@c'], 'foo\n\nR=a@c, xx'),
('foo\nTBR=xx', ['a@c'], 'foo\n\nR=a@c\nTBR=xx'),
('foo\nTBR=xx\nR=yy', ['a@c'], 'foo\n\nR=a@c, yy\nTBR=xx'),
('foo\nBUG=', ['a@c'], 'foo\nBUG=\nR=a@c'),
('foo\nR=xx\nTBR=yy\nR=bar', ['a@c'], 'foo\n\nR=a@c, xx, bar\nTBR=yy'),
('foo', ['a@c', 'b@c'], 'foo\n\nR=a@c, b@c'),
('foo\nBar\n\nR=\nBUG=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='),
('foo\nBar\n\nR=\nBUG=\nR=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='),
# Same as the line before, but full of whitespaces.
(
'foo\nBar\n\n R = \n BUG = \n R = ', ['c@c'],
'foo\nBar\n\nR=c@c\n BUG =',
),
# Whitespaces aren't interpreted as new lines.
('foo BUG=allo R=joe ', ['c@c'], 'foo BUG=allo R=joe\n\nR=c@c'),
]
expected = [i[2] for i in data]
actual = []
for orig, reviewers, _expected in data:
obj = git_cl.ChangeDescription(orig)
obj.update_reviewers(reviewers)
actual.append(obj.description)
self.assertEqual(expected, actual)
def test_trybots_from_PRESUBMIT(self):
TEST_MASTER = 'testMaster'
TEST_BUILDER = 'testBuilder'
MASTERS = {TEST_MASTER:{TEST_BUILDER:['a']}}
self.mock(presubmit_support, 'DoGetTryMasters',
lambda *args: MASTERS)
change_mock = ChangeMock()
changelist_mock = ChangelistMock(change_mock)
self.mock(git_cl, 'is_dirty_git_tree', lambda x: False)
self.mock(git_cl, 'print_stats', lambda *arg: True)
self.mock(git_cl, 'Changelist', lambda *args: changelist_mock)
self.mock(git_cl, 'CreateDescriptionFromLog', lambda arg: 'Commit message')
self.mock(git_cl.ChangeDescription, 'prompt', lambda self: None)
self.calls = [
((['git', 'config', 'rietveld.autoupdate',],),
''),
((['git', 'config', 'gerrit.host',],),
''),
((['git', 'rev-parse', '--show-cdup',],),
''),
((['git', 'config', 'rietveld.private',],),
''),
((['git', 'config', '--local', '--get-regexp', '^svn-remote\\.'],),
''),
((['git', 'config', 'rietveld.project',],),
''),
((['git', 'rev-parse', 'HEAD',],),
''),
]
stored_description = []
def check_upload(args):
i = 0
for arg in args:
if arg == '--message':
break
i += 1
self.assertTrue(i < len(args))
stored_description.append(args[i+1])
return 1, 2
self.mock(git_cl.upload, 'RealMain', check_upload)
git_cl.main(['upload', '--bypass-hooks', '--auto-bots'])
found = re.search("CQ_TRYBOTS=(.*?)$", stored_description[0])
self.assertTrue(found)
self.assertEqual(found.group(1), '%s:%s' % (TEST_MASTER, TEST_BUILDER))
class ChangelistMock(object):
# Disable "Method could be a function"
# pylint: disable=R0201
def __init__(self, change_mock):
self.change_mock = change_mock
def GetChange(self, *args):
return self.change_mock
def GetIssue(self):
return None
def GetBranch(self):
return []
def GetCommonAncestorWithUpstream(self):
return []
def GetCCList(self):
return []
def GetGitBaseUrlFromConfig(self):
return ''
def GetRemoteUrl(self):
return ''
def GetRietveldServer(self):
return None
def SetWatchers(self, *args):
pass
def SetIssue(self, issue):
pass
def SetPatchset(self, issue):
pass
class ChangeMock(object):
# Disable "Method could be a function"
# pylint: disable=R0201
def __init__(self):
self.stored_description = None
def SetDescriptionText(self, desc):
self.stored_description = desc
def FullDescriptionText(self):
return 'HIHI TEST DESCRIPTION'
def RepositoryRoot(self):
return []
def AffectedFiles(self):
return []
def LocalPaths(self):
return None
if __name__ == '__main__':
git_cl.logging.basicConfig(
level=git_cl.logging.DEBUG if '-v' in sys.argv else git_cl.logging.ERROR)
unittest.main()
| 34.193807 | 79 | 0.552504 |
import os
import StringIO
import stat
import sys
import unittest
import re
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.auto_stub import TestCase
import git_cl
import git_common
import subprocess2
import presubmit_support
class PresubmitMock(object):
def __init__(self, *args, **kwargs):
self.reviewers = []
@staticmethod
def should_continue():
return True
class RietveldMock(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def get_description(issue):
return 'Issue: %d' % issue
@staticmethod
def get_issue_properties(_issue, _messages):
return {
'reviewers': ['joe@chromium.org', 'john@chromium.org'],
'messages': [
{
'approval': True,
'sender': 'john@chromium.org',
},
],
}
class WatchlistsMock(object):
def __init__(self, _):
pass
@staticmethod
def GetWatchersForPaths(_):
return ['joe@example.com']
class CodereviewSettingsFileMock(object):
def __init__(self):
pass
def read(self):
return ("CODE_REVIEW_SERVER: gerrit.chromium.org\n" +
"GERRIT_HOST: gerrit.chromium.org\n" +
"GERRIT_PORT: 29418\n")
class TestGitCl(TestCase):
def setUp(self):
super(TestGitCl, self).setUp()
self.calls = []
self._calls_done = 0
self.mock(subprocess2, 'call', self._mocked_call)
self.mock(subprocess2, 'check_call', self._mocked_call)
self.mock(subprocess2, 'check_output', self._mocked_call)
self.mock(subprocess2, 'communicate', self._mocked_call)
self.mock(subprocess2, 'Popen', self._mocked_call)
self.mock(git_common, 'get_or_create_merge_base',
lambda *a: (
self._mocked_call(['get_or_create_merge_base']+list(a))))
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: '')
self.mock(git_cl, 'ask_for_data', self._mocked_call)
self.mock(git_cl.breakpad, 'post', self._mocked_call)
self.mock(git_cl.breakpad, 'SendStack', self._mocked_call)
self.mock(git_cl.presubmit_support, 'DoPresubmitChecks', PresubmitMock)
self.mock(git_cl.rietveld, 'Rietveld', RietveldMock)
self.mock(git_cl.rietveld, 'CachingRietveld', RietveldMock)
self.mock(git_cl.upload, 'RealMain', self.fail)
self.mock(git_cl.watchlists, 'Watchlists', WatchlistsMock)
git_cl.settings = None
def tearDown(self):
if not self.has_failed():
self.assertEquals([], self.calls)
super(TestGitCl, self).tearDown()
def _mocked_call(self, *args, **_kwargs):
self.assertTrue(
self.calls,
'@%d Expected: <Missing> Actual: %r' % (self._calls_done, args))
expected_args, result = self.calls.pop(0)
# Also logs otherwise it could get caught in a try/finally and be hard to
# diagnose.
if expected_args != args:
msg = '@%d Expected: %r Actual: %r' % (
self._calls_done, expected_args, args)
git_cl.logging.error(msg)
self.fail(msg)
self._calls_done += 1
return result
@classmethod
def _upload_calls(cls, similarity, find_copies, private):
return (cls._git_base_calls(similarity, find_copies) +
cls._git_upload_calls(private))
@classmethod
def _upload_no_rev_calls(cls, similarity, find_copies):
return (cls._git_base_calls(similarity, find_copies) +
cls._git_upload_no_rev_calls())
@classmethod
def _git_base_calls(cls, similarity, find_copies):
if similarity is None:
similarity = '50'
similarity_call = ((['git', 'config', '--int', '--get',
'branch.master.git-cl-similarity'],), '')
else:
similarity_call = ((['git', 'config', '--int',
'branch.master.git-cl-similarity', similarity],), '')
if find_copies is None:
find_copies = True
find_copies_call = ((['git', 'config', '--int', '--get',
'branch.master.git-find-copies'],), '')
else:
val = str(int(find_copies))
find_copies_call = ((['git', 'config', '--int',
'branch.master.git-find-copies', val],), '')
if find_copies:
stat_call = ((['git', 'diff', '--no-ext-diff', '--stat',
'--find-copies-harder', '-l100000', '-C'+similarity,
'fake_ancestor_sha', 'HEAD'],), '+dat')
else:
stat_call = ((['git', 'diff', '--no-ext-diff', '--stat',
'-M'+similarity, 'fake_ancestor_sha', 'HEAD'],), '+dat')
return [
((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'rietveld.server'],),
'codereview.example.com'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
similarity_call,
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
find_copies_call,
((['git', 'update-index', '--refresh', '-q'],), ''),
((['git', 'diff-index', '--name-status', 'HEAD'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master', 'master'],),
'fake_ancestor_sha'),
] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git', 'diff', '--name-status', '--no-renames', '-r',
'fake_ancestor_sha...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.rietveldissue'],), ''),
((['git', 'config', 'branch.master.rietveldpatchset'],),
''),
((['git', 'log', '--pretty=format:%s%n%n%b',
'fake_ancestor_sha...'],),
'foo'),
((['git', 'config', 'user.email'],), 'me@example.com'),
stat_call,
((['git', 'config', 'gerrit.host'],), ''),
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
'desc\n'),
((['git', 'config', 'rietveld.bug-prefix'],), ''),
]
@classmethod
def _git_upload_no_rev_calls(cls):
return [
((['git', 'config', 'core.editor'],), ''),
]
@classmethod
def _git_upload_calls(cls, private):
if private:
cc_call = []
private_call = []
else:
cc_call = [((['git', 'config', 'rietveld.cc'],), '')]
private_call = [
((['git', 'config', 'rietveld.private'],), '')]
return [
((['git', 'config', 'core.editor'],), ''),
] + cc_call + private_call + [
((['git', 'config', 'branch.master.base-url'],), ''),
((['git',
'config', '--local', '--get-regexp', '^svn-remote\\.'],),
(('', None), 0)),
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'svn', 'info'],), ''),
((['git', 'config', 'rietveld.project'],), ''),
((['git',
'config', 'branch.master.rietveldissue', '1'],), ''),
((['git', 'config', 'branch.master.rietveldserver',
'https://codereview.example.com'],), ''),
((['git',
'config', 'branch.master.rietveldpatchset', '2'],), ''),
((['git', 'rev-parse', 'HEAD'],), 'hash'),
((['git', 'symbolic-ref', 'HEAD'],), 'hash'),
((['git',
'config', 'branch.hash.last-upload-hash', 'hash'],), ''),
]
@staticmethod
def _git_sanity_checks(diff_base, working_branch):
fake_ancestor = 'fake_ancestor'
fake_cl = 'fake_cl_for_patch'
return [
# Calls to verify branch point is ancestor
((['git',
'rev-parse', '--verify', diff_base],), fake_ancestor),
((['git',
'merge-base', fake_ancestor, 'HEAD'],), fake_ancestor),
((['git',
'rev-list', '^' + fake_ancestor, 'HEAD'],), fake_cl),
# Mock a config miss (error code 1)
((['git',
'config', 'gitcl.remotebranch'],), (('', None), 1)),
# Call to GetRemoteBranch()
((['git',
'config', 'branch.%s.merge' % working_branch],),
'refs/heads/master'),
((['git',
'config', 'branch.%s.remote' % working_branch],), 'origin'),
((['git', 'rev-list', '^' + fake_ancestor,
'refs/remotes/origin/master'],), ''),
]
@classmethod
def _dcommit_calls_1(cls):
return [
((['git',
'config', '--local', '--get-regexp', '^svn-remote\\.'],),
((('svn-remote.svn.url svn://svn.chromium.org/chrome\n'
'svn-remote.svn.fetch trunk/src:refs/remotes/origin/master'),
None),
0)),
((['git', 'config', 'rietveld.autoupdate'],),
''),
((['git',
'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git', 'config', '--int', '--get',
'branch.working.git-cl-similarity'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git', 'config', '--int', '--get',
'branch.working.git-find-copies'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'),
((['git',
'config', 'branch.working.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.working.remote'],), 'origin'),
((['git', 'config', 'branch.working.merge'],),
'refs/heads/master'),
((['git', 'config', 'branch.working.remote'],), 'origin'),
((['git', 'rev-list', '--merges',
'--grep=^SVN changes up to revision [0-9]*$',
'refs/remotes/origin/master^!'],), ''),
((['git', 'update-index', '--refresh', '-q'],), ''),
((['git', 'diff-index', '--name-status', 'HEAD'],), ''),
((['git', 'rev-list', '^refs/heads/working',
'refs/remotes/origin/master'],),
''),
((['git',
'log', '--grep=^git-svn-id:', '-1', '--pretty=format:%H'],),
'3fc18b62c4966193eb435baabe2d18a3810ec82e'),
((['git',
'rev-list', '^3fc18b62c4966193eb435baabe2d18a3810ec82e',
'refs/remotes/origin/master'],), ''),
((['git',
'merge-base', 'refs/remotes/origin/master', 'HEAD'],),
'fake_ancestor_sha'),
]
@classmethod
def _dcommit_calls_normal(cls):
return [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],),
'00ff397798ea57439712ed7e04ab96e13969ef40'),
((['git',
'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...',
'.'],),
'M\tPRESUBMIT.py'),
((['git',
'config', 'branch.working.rietveldissue'],), '12345'),
((['git',
'config', 'branch.working.rietveldpatchset'],), '31137'),
((['git', 'config', 'branch.working.rietveldserver'],),
'codereview.example.com'),
((['git', 'config', 'user.email'],), 'author@example.com'),
((['git', 'config', 'rietveld.tree-status-url'],), ''),
]
@classmethod
def _dcommit_calls_bypassed(cls):
return [
((['git',
'config', 'branch.working.rietveldissue'],), '12345'),
((['git', 'config', 'branch.working.rietveldserver'],),
'codereview.example.com'),
((['git', 'config', 'rietveld.tree-status-url'],), ''),
(('GitClHooksBypassedCommit',
'Issue https://codereview.example.com/12345 bypassed hook when '
'committing (tree status was "unset")'), None),
]
@classmethod
def _dcommit_calls_3(cls):
return [
((['git',
'diff', '--no-ext-diff', '--stat', '--find-copies-harder',
'-l100000', '-C50', 'fake_ancestor_sha',
'refs/heads/working'],),
(' PRESUBMIT.py | 2 +-\n'
' 1 files changed, 1 insertions(+), 1 deletions(-)\n')),
(('About to commit; enter to confirm.',), None),
((['git', 'show-ref', '--quiet', '--verify',
'refs/heads/git-cl-commit'],),
(('', None), 0)),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
((['git', 'show-ref', '--quiet', '--verify',
'refs/heads/git-cl-cherry-pick'],), ''),
((['git', 'rev-parse', '--show-cdup'],), '\n'),
((['git', 'checkout', '-q', '-b', 'git-cl-commit'],), ''),
((['git', 'reset', '--soft', 'fake_ancestor_sha'],), ''),
((['git', 'commit', '-m',
'Issue: 12345\n\nR=john@chromium.org\n\n'
'Review URL: https://codereview.example.com/12345'],),
''),
((['git',
'svn', 'dcommit', '-C50', '--no-rebase', '--rmdir'],),
(('', None), 0)),
((['git', 'checkout', '-q', 'working'],), ''),
((['git', 'branch', '-D', 'git-cl-commit'],), ''),
]
@staticmethod
def _cmd_line(description, args, similarity, find_copies, private):
return [
'upload', '--assume_yes', '--server',
'https://codereview.example.com',
'--message', description
] + args + [
'--cc', 'joe@example.com',
] + (['--private'] if private else []) + [
'--git_similarity', similarity or '50'
] + (['--git_no_find_copies'] if find_copies == False else []) + [
'fake_ancestor_sha', 'HEAD'
]
def _run_reviewer_test(
self,
upload_args,
expected_description,
returned_description,
final_description,
reviewers,
private=False):
try:
similarity = upload_args[upload_args.index('--similarity')+1]
except ValueError:
similarity = None
if '--find-copies' in upload_args:
find_copies = True
elif '--no-find-copies' in upload_args:
find_copies = False
else:
find_copies = None
private = '--private' in upload_args
self.calls = self._upload_calls(similarity, find_copies, private)
def RunEditor(desc, _, **kwargs):
self.assertEquals(
'
'
'
'
'--------------------\n' +
expected_description,
desc)
return returned_description
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
def check_upload(args):
cmd_line = self._cmd_line(final_description, reviewers, similarity,
find_copies, private)
self.assertEquals(cmd_line, args)
return 1, 2
self.mock(git_cl.upload, 'RealMain', check_upload)
git_cl.main(['upload'] + upload_args)
def test_no_reviewer(self):
self._run_reviewer_test(
[],
'desc\n\nBUG=',
'
'desc\n\nBUG=',
[])
def test_keep_similarity(self):
self._run_reviewer_test(
['--similarity', '70'],
'desc\n\nBUG=',
'
'desc\n\nBUG=',
[])
def test_keep_find_copies(self):
self._run_reviewer_test(
['--no-find-copies'],
'desc\n\nBUG=',
'
'desc\n\nBUG=',
[])
def test_private(self):
self._run_reviewer_test(
['--private'],
'desc\n\nBUG=',
'
'desc\n\nBUG=',
[])
def test_reviewers_cmd_line(self):
# Reviewer is passed as-is
description = 'desc\n\nR=foo@example.com\nBUG='
self._run_reviewer_test(
['-r' 'foo@example.com'],
description,
'\n%s\n' % description,
description,
['--reviewers=foo@example.com'])
def test_reviewer_tbr_overriden(self):
# Reviewer is overriden with TBR
# Also verifies the regexp work without a trailing LF
description = 'Foo Bar\n\nTBR=reviewer@example.com'
self._run_reviewer_test(
['-r' 'foo@example.com'],
'desc\n\nR=foo@example.com\nBUG=',
description.strip('\n'),
description,
['--reviewers=reviewer@example.com'])
def test_reviewer_multiple(self):
# Handles multiple R= or TBR= lines.
description = (
'Foo Bar\nTBR=reviewer@example.com\nBUG=\nR=another@example.com')
self._run_reviewer_test(
[],
'desc\n\nBUG=',
description,
description,
['--reviewers=another@example.com,reviewer@example.com'])
def test_reviewer_send_mail(self):
# --send-mail can be used without -r if R= is used
description = 'Foo Bar\nR=reviewer@example.com'
self._run_reviewer_test(
['--send-mail'],
'desc\n\nBUG=',
description.strip('\n'),
description,
['--reviewers=reviewer@example.com', '--send_mail'])
def test_reviewer_send_mail_no_rev(self):
# Fails without a reviewer.
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
try:
self.calls = self._upload_no_rev_calls(None, None)
def RunEditor(desc, _, **kwargs):
return desc
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
self.mock(sys, 'stdout', stdout)
self.mock(sys, 'stderr', stderr)
git_cl.main(['upload', '--send-mail'])
self.fail()
except SystemExit:
self.assertEqual(
'Using 50% similarity for rename/copy detection. Override with '
'--similarity.\n',
stdout.getvalue())
self.assertEqual(
'Must specify reviewers to send email.\n', stderr.getvalue())
def test_dcommit(self):
self.calls = (
self._dcommit_calls_1() +
self._git_sanity_checks('fake_ancestor_sha', 'working') +
self._dcommit_calls_normal() +
self._dcommit_calls_3())
git_cl.main(['dcommit'])
def test_dcommit_bypass_hooks(self):
self.calls = (
self._dcommit_calls_1() +
self._dcommit_calls_bypassed() +
self._dcommit_calls_3())
git_cl.main(['dcommit', '--bypass-hooks'])
@classmethod
def _gerrit_base_calls(cls):
return [
((['git', 'config', 'rietveld.autoupdate'],),
''),
((['git',
'config', 'rietveld.server'],), 'codereview.example.com'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', '--int', '--get',
'branch.master.git-cl-similarity'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', '--int', '--get',
'branch.master.git-find-copies'],), ''),
((['git', 'update-index', '--refresh', '-q'],), ''),
((['git', 'diff-index', '--name-status', 'HEAD'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master', 'master'],),
'fake_ancestor_sha'),
] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git',
'diff', '--name-status', '--no-renames', '-r',
'fake_ancestor_sha...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.rietveldissue'],), ''),
((['git',
'config', 'branch.master.rietveldpatchset'],), ''),
((['git',
'log', '--pretty=format:%s%n%n%b', 'fake_ancestor_sha...'],),
'foo'),
((['git', 'config', 'user.email'],), 'me@example.com'),
((['git',
'diff', '--no-ext-diff', '--stat', '--find-copies-harder',
'-l100000', '-C50', 'fake_ancestor_sha', 'HEAD'],),
'+dat'),
]
@staticmethod
def _gerrit_upload_calls(description, reviewers):
calls = [
((['git', 'config', 'gerrit.host'],),
'gerrit.example.com'),
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
description)
]
if git_cl.CHANGE_ID not in description:
calls += [
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
description),
((['git', 'commit', '--amend', '-m', description],),
''),
((['git', 'log', '--pretty=format:%s\n\n%b',
'fake_ancestor_sha..HEAD'],),
description)
]
calls += [
((['git', 'rev-list', 'origin/master..'],), ''),
((['git', 'config', 'rietveld.cc'],), '')
]
receive_pack = '--receive-pack=git receive-pack '
receive_pack += '--cc=joe@example.com' # from watch list
if reviewers:
receive_pack += ' '
receive_pack += ' '.join(
'--reviewer=' + email for email in sorted(reviewers))
receive_pack += ''
calls += [
((['git',
'push', receive_pack, 'origin', 'HEAD:refs/for/master'],),
'')
]
return calls
def _run_gerrit_upload_test(
self,
upload_args,
description,
reviewers):
self.calls = self._gerrit_base_calls()
self.calls += self._gerrit_upload_calls(description, reviewers)
git_cl.main(['upload'] + upload_args)
def test_gerrit_upload_without_change_id(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n',
[])
def test_gerrit_no_reviewer(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\nChange-Id:123456789\n',
[])
def test_gerrit_reviewers_cmd_line(self):
self._run_gerrit_upload_test(
['-r', 'foo@example.com'],
'desc\n\nBUG=\nChange-Id:123456789',
['foo@example.com'])
def test_gerrit_reviewer_multiple(self):
self._run_gerrit_upload_test(
[],
'desc\nTBR=reviewer@example.com\nBUG=\nR=another@example.com\n'
'Change-Id:123456789\n',
['reviewer@example.com', 'another@example.com'])
def test_config_gerrit_download_hook(self):
self.mock(git_cl, 'FindCodereviewSettingsFile', CodereviewSettingsFileMock)
def ParseCodereviewSettingsContent(content):
keyvals = {}
keyvals['CODE_REVIEW_SERVER'] = 'gerrit.chromium.org'
keyvals['GERRIT_HOST'] = 'gerrit.chromium.org'
keyvals['GERRIT_PORT'] = '29418'
return keyvals
self.mock(git_cl.gclient_utils, 'ParseCodereviewSettingsContent',
ParseCodereviewSettingsContent)
self.mock(git_cl.os, 'access', self._mocked_call)
self.mock(git_cl.os, 'chmod', self._mocked_call)
src_dir = os.path.join(os.path.sep, 'usr', 'local', 'src')
def AbsPath(path):
if not path.startswith(os.path.sep):
return os.path.join(src_dir, path)
return path
self.mock(git_cl.os.path, 'abspath', AbsPath)
commit_msg_path = os.path.join(src_dir, '.git', 'hooks', 'commit-msg')
def Exists(path):
if path == commit_msg_path:
return False
# others paths, such as /usr/share/locale/....
return True
self.mock(git_cl.os.path, 'exists', Exists)
self.mock(git_cl, 'urlretrieve', self._mocked_call)
self.mock(git_cl, 'hasSheBang', self._mocked_call)
self.calls = [
((['git', 'config', 'rietveld.autoupdate'],),
''),
((['git', 'config', 'rietveld.server',
'gerrit.chromium.org'],), ''),
((['git', 'config', '--unset-all', 'rietveld.cc'],), ''),
((['git', 'config', '--unset-all',
'rietveld.private'],), ''),
((['git', 'config', '--unset-all',
'rietveld.tree-status-url'],), ''),
((['git', 'config', '--unset-all',
'rietveld.viewvc-url'],), ''),
((['git', 'config', '--unset-all',
'rietveld.bug-prefix'],), ''),
((['git', 'config', '--unset-all',
'rietveld.cpplint-regex'],), ''),
((['git', 'config', '--unset-all',
'rietveld.cpplint-ignore-regex'],), ''),
((['git', 'config', '--unset-all',
'rietveld.project'],), ''),
((['git', 'config', 'gerrit.host',
'gerrit.chromium.org'],), ''),
# DownloadHooks(False)
((['git', 'config', 'gerrit.host'],),
'gerrit.chromium.org'),
((['git', 'rev-parse', '--show-cdup'],), ''),
((commit_msg_path, os.X_OK,), False),
(('https://gerrit-review.googlesource.com/tools/hooks/commit-msg',
commit_msg_path,), ''),
((commit_msg_path,), True),
((commit_msg_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,), ''),
# GetCodereviewSettingsInteractively
((['git', 'config', 'rietveld.server'],),
'gerrit.chromium.org'),
(('Rietveld server (host[:port]) [https://gerrit.chromium.org]:',),
''),
((['git', 'config', 'rietveld.cc'],), ''),
(('CC list:',), ''),
((['git', 'config', 'rietveld.private'],), ''),
(('Private flag (rietveld only):',), ''),
((['git', 'config', 'rietveld.tree-status-url'],), ''),
(('Tree status URL:',), ''),
((['git', 'config', 'rietveld.viewvc-url'],), ''),
(('ViewVC URL:',), ''),
# DownloadHooks(True)
((['git', 'config', 'rietveld.bug-prefix'],), ''),
(('Bug Prefix:',), ''),
((commit_msg_path, os.X_OK,), True),
]
git_cl.main(['config'])
def test_update_reviewers(self):
data = [
('foo', [], 'foo'),
('foo\nR=xx', [], 'foo\nR=xx'),
('foo\nTBR=xx', [], 'foo\nTBR=xx'),
('foo', ['a@c'], 'foo\n\nR=a@c'),
('foo\nR=xx', ['a@c'], 'foo\n\nR=a@c, xx'),
('foo\nTBR=xx', ['a@c'], 'foo\n\nR=a@c\nTBR=xx'),
('foo\nTBR=xx\nR=yy', ['a@c'], 'foo\n\nR=a@c, yy\nTBR=xx'),
('foo\nBUG=', ['a@c'], 'foo\nBUG=\nR=a@c'),
('foo\nR=xx\nTBR=yy\nR=bar', ['a@c'], 'foo\n\nR=a@c, xx, bar\nTBR=yy'),
('foo', ['a@c', 'b@c'], 'foo\n\nR=a@c, b@c'),
('foo\nBar\n\nR=\nBUG=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='),
('foo\nBar\n\nR=\nBUG=\nR=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='),
# Same as the line before, but full of whitespaces.
(
'foo\nBar\n\n R = \n BUG = \n R = ', ['c@c'],
'foo\nBar\n\nR=c@c\n BUG =',
),
# Whitespaces aren't interpreted as new lines.
('foo BUG=allo R=joe ', ['c@c'], 'foo BUG=allo R=joe\n\nR=c@c'),
]
expected = [i[2] for i in data]
actual = []
for orig, reviewers, _expected in data:
obj = git_cl.ChangeDescription(orig)
obj.update_reviewers(reviewers)
actual.append(obj.description)
self.assertEqual(expected, actual)
def test_trybots_from_PRESUBMIT(self):
TEST_MASTER = 'testMaster'
TEST_BUILDER = 'testBuilder'
MASTERS = {TEST_MASTER:{TEST_BUILDER:['a']}}
self.mock(presubmit_support, 'DoGetTryMasters',
lambda *args: MASTERS)
change_mock = ChangeMock()
changelist_mock = ChangelistMock(change_mock)
self.mock(git_cl, 'is_dirty_git_tree', lambda x: False)
self.mock(git_cl, 'print_stats', lambda *arg: True)
self.mock(git_cl, 'Changelist', lambda *args: changelist_mock)
self.mock(git_cl, 'CreateDescriptionFromLog', lambda arg: 'Commit message')
self.mock(git_cl.ChangeDescription, 'prompt', lambda self: None)
self.calls = [
((['git', 'config', 'rietveld.autoupdate',],),
''),
((['git', 'config', 'gerrit.host',],),
''),
((['git', 'rev-parse', '--show-cdup',],),
''),
((['git', 'config', 'rietveld.private',],),
''),
((['git', 'config', '--local', '--get-regexp', '^svn-remote\\.'],),
''),
((['git', 'config', 'rietveld.project',],),
''),
((['git', 'rev-parse', 'HEAD',],),
''),
]
stored_description = []
def check_upload(args):
i = 0
for arg in args:
if arg == '--message':
break
i += 1
self.assertTrue(i < len(args))
stored_description.append(args[i+1])
return 1, 2
self.mock(git_cl.upload, 'RealMain', check_upload)
git_cl.main(['upload', '--bypass-hooks', '--auto-bots'])
found = re.search("CQ_TRYBOTS=(.*?)$", stored_description[0])
self.assertTrue(found)
self.assertEqual(found.group(1), '%s:%s' % (TEST_MASTER, TEST_BUILDER))
class ChangelistMock(object):
def __init__(self, change_mock):
self.change_mock = change_mock
def GetChange(self, *args):
return self.change_mock
def GetIssue(self):
return None
def GetBranch(self):
return []
def GetCommonAncestorWithUpstream(self):
return []
def GetCCList(self):
return []
def GetGitBaseUrlFromConfig(self):
return ''
def GetRemoteUrl(self):
return ''
def GetRietveldServer(self):
return None
def SetWatchers(self, *args):
pass
def SetIssue(self, issue):
pass
def SetPatchset(self, issue):
pass
class ChangeMock(object):
def __init__(self):
self.stored_description = None
def SetDescriptionText(self, desc):
self.stored_description = desc
def FullDescriptionText(self):
return 'HIHI TEST DESCRIPTION'
def RepositoryRoot(self):
return []
def AffectedFiles(self):
return []
def LocalPaths(self):
return None
if __name__ == '__main__':
git_cl.logging.basicConfig(
level=git_cl.logging.DEBUG if '-v' in sys.argv else git_cl.logging.ERROR)
unittest.main()
| true | true |
f7f57b728d4c43cf0efb7380942ba9d305ce6c06 | 495 | py | Python | exercicios/exercicio011.py | callebbs/curosemvideo-python | 63c2aac1f671973e92b753ec487e82c8871a3ded | [
"MIT"
] | null | null | null | exercicios/exercicio011.py | callebbs/curosemvideo-python | 63c2aac1f671973e92b753ec487e82c8871a3ded | [
"MIT"
] | null | null | null | exercicios/exercicio011.py | callebbs/curosemvideo-python | 63c2aac1f671973e92b753ec487e82c8871a3ded | [
"MIT"
] | null | null | null | '''
Faça um programa que leia a largura e a altura de uma parede em metros, calcule a sua área e a quantidade de tinta necessária para pintá-la, sabendo que cada litro de tinta pinta uma área de 2m².
'''
largura = float(input("Digite a largura da parede em metros: "))
altura = float(input("Digite a altura da parede em metros: "))
area = largura * altura
tinta = area / 2
print('A área da parede de {}x{} é de {}. Logo é necessário {} litros de tinta.'.format(largura, altura, area, tinta)) | 41.25 | 195 | 0.713131 |
largura = float(input("Digite a largura da parede em metros: "))
altura = float(input("Digite a altura da parede em metros: "))
area = largura * altura
tinta = area / 2
print('A área da parede de {}x{} é de {}. Logo é necessário {} litros de tinta.'.format(largura, altura, area, tinta)) | true | true |
f7f57c2dd18df37cd6f56735ad2e04968110d998 | 11,220 | py | Python | models/base_model.py | herobd/GAN_aug | b240da32d4f3ae9a00a9d395ac8f29728623f6b4 | [
"BSD-3-Clause"
] | null | null | null | models/base_model.py | herobd/GAN_aug | b240da32d4f3ae9a00a9d395ac8f29728623f6b4 | [
"BSD-3-Clause"
] | null | null | null | models/base_model.py | herobd/GAN_aug | b240da32d4f3ae9a00a9d395ac8f29728623f6b4 | [
"BSD-3-Clause"
] | null | null | null | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
aux = self.load_networks(load_suffix)
else:
aux = None
self.print_networks(opt.verbose)
return aux
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch, aux):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
save_filename = '%s_aux.pth' % (epoch)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(aux,save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
load_filename = '%s_aux.pth' % (epoch)
load_path = os.path.join(self.save_dir, load_filename)
if os.path.exists(load_path):
return torch.load(load_path)
else:
return None
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def get_optimizer_states(self):
return [o.state_dict() for o in self.optimizers]
def set_optimizer_states(self,states):
for i,s in enumerate(states):
self.optimizers[i].load_state_dict(s)
def get_scheduler_states(self):
return [o.state_dict() for o in self.schedulers]
def set_scheduler_states(self,states):
for i,s in enumerate(states):
self.schedulers[i].load_state_dict(s)
| 44.347826 | 260 | 0.602496 | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
if opt.preprocess != 'scale_width':
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
@abstractmethod
def set_input(self, input):
pass
@abstractmethod
def forward(self):
pass
@abstractmethod
def optimize_parameters(self):
pass
def setup(self, opt):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
aux = self.load_networks(load_suffix)
else:
aux = None
self.print_networks(opt.verbose)
return aux
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
pass
def get_image_paths(self):
return self.image_paths
def update_learning_rate(self):
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name))
return errors_ret
def save_networks(self, epoch, aux):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
save_filename = '%s_aux.pth' % (epoch)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(aux,save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys):
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
for key in list(state_dict.keys()):
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
load_filename = '%s_aux.pth' % (epoch)
load_path = os.path.join(self.save_dir, load_filename)
if os.path.exists(load_path):
return torch.load(load_path)
else:
return None
def print_networks(self, verbose):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def get_optimizer_states(self):
return [o.state_dict() for o in self.optimizers]
def set_optimizer_states(self,states):
for i,s in enumerate(states):
self.optimizers[i].load_state_dict(s)
def get_scheduler_states(self):
return [o.state_dict() for o in self.schedulers]
def set_scheduler_states(self,states):
for i,s in enumerate(states):
self.schedulers[i].load_state_dict(s)
| true | true |
f7f57c4ab5df4e6c887af8e236c4f9d839e0bf42 | 4,532 | py | Python | src/m2_functions.py | archulca/02-ObjectsFunctionsAndMethods | c14e2558f61b1edf257460f4362311f977fe9a38 | [
"MIT"
] | null | null | null | src/m2_functions.py | archulca/02-ObjectsFunctionsAndMethods | c14e2558f61b1edf257460f4362311f977fe9a38 | [
"MIT"
] | null | null | null | src/m2_functions.py | archulca/02-ObjectsFunctionsAndMethods | c14e2558f61b1edf257460f4362311f977fe9a38 | [
"MIT"
] | null | null | null | """
Practice DEFINING and CALLING
FUNCTIONS
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Carla Archuleta.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# DONE: 2.
# Allow this module to use the rosegraphics.py module by marking the
# src
# folder in this project as a "Sources Root", as follows:
#
# In the Project window (to the left), right click on the src folder,
# then select Mark Directory As ~ Sources Root.
#
###############################################################################
import rosegraphics as rg
def main():
hypotenuse(a,b)
turtles(10, 'blue')
turtles(2,'purple')
"""
TESTS the functions that you will write below.
You write the tests per the _TODO_s below.
"""
###############################################################################
# DONE: 3a. Define a function immediately below this _TODO_.
# It takes two arguments that denote, for a right triangle,
# the lengths of the two sides adjacent to its right angle,
# and it returns the length of the hypotenuse of that triangle.
# HINT: Apply the Pythagorean theorem.
import math as m
a = 3
b = 4
def hypotenuse(a, b):
c = m.sqrt(a**2+b**2)
return(c)
print(hypotenuse(a,b))
# You may name the function and its parameters whatever you wish.
#
# DONE: 3b. In main, CALL your function and print the returned value,
# to test whether you defined the function correctly.
#
###############################################################################
###############################################################################
# DONE: 4a. Define a function immediately below this _TODO_.
# It takes two arguments:
# -- a string that represents a color (e.g. 'red')
# -- a positive integer that represents the thickness of a Pen.
def turtles(thickness,color):
window = rg.TurtleWindow()
dan = rg.SimpleTurtle()
karen = rg.SimpleTurtle()
dan.speed = 1
karen.speed = 1
dan.pen = rg.Pen('green',thickness)
karen.pen = rg.Pen(color,5)
dan.forward(100)
karen.backward(100)
window.close_on_mouse_click()
# The function should do the following (in the order listed):
# a. Constructs a TurtleWindow.
# b. Constructs two SimpleTurtles, where:
# - one has a Pen whose color is "green" and has the GIVEN thickness
# - - the other has a Pen whose color is the GIVEN color
# and whose thickness is 5
#
# Note: the "GIVEN" color means the PARAMETER that represents a color.
# Likewise, the "GIVEN" thickness means the PARAMETER for thickness.
#
# c. Makes the first (green) SimpleTurtle move FORWARD 100 pixels, and
# makes the other (thickness 5) SimpleTurtle move BACKWARD 100 pixels.
#
# d. Tells the TurtleWindow to wait until the mouse is clicked.
#
# You may name the function and its parameters whatever you wish.
#
# DONE: 4b. In main, CALL your function at least TWICE (with different values
# for the arguments) to test whether you defined the function correctly.
main()
###############################################################################
###############################################################################
# DONE: 5.
# COMMIT-and-PUSH your work (after changing this TO-DO to DONE).
#
# As a reminder, here is how you should do so:
# 1. Select VCS from the menu bar (above).
# 2. Choose Commit from the pull-down menu that appears.
# 3. In the Commit Changes window that pops up,
# press the Commit and Push button.
# Note: If you see only a Commit button:
# - HOVER over the Commit button
# (in the lower-right corner of the window)
# - CLICK on Commit and Push.
#
# COMMIT adds the changed work to the version control on your computer.
# PUSH adds the changed work into your Github repository in the "cloud".
#
# COMMIT-and-PUSH your work as often as you want, but at the least, commit
# and push after you have tested a module and believe that it is correct.
#
###############################################################################
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 35.40625 | 79 | 0.553398 | true | true | |
f7f57d00224792a4f3f8c2d4ac015d89da307f56 | 38 | py | Python | atom_babies/set_orientation_upside_down.py | 3110/atom-babies | 4cd326308ec407df0b23b5833213bea43d598934 | [
"MIT"
] | 2 | 2022-02-09T02:02:31.000Z | 2022-03-20T09:18:06.000Z | atom_babies/set_orientation_upside_down.py | 3110/atom-babies | 4cd326308ec407df0b23b5833213bea43d598934 | [
"MIT"
] | null | null | null | atom_babies/set_orientation_upside_down.py | 3110/atom-babies | 4cd326308ec407df0b23b5833213bea43d598934 | [
"MIT"
] | null | null | null | _ab_set_orientation('ORI_UPSIDE_DOWN') | 38 | 38 | 0.894737 | _ab_set_orientation('ORI_UPSIDE_DOWN') | true | true |
f7f57dc0f8eda2451475c2281b00aa262d0eaddb | 1,658 | py | Python | wagtail/admin/tests/test_jinja2.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | 1 | 2022-02-09T05:25:30.000Z | 2022-02-09T05:25:30.000Z | wagtail/admin/tests/test_jinja2.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | wagtail/admin/tests/test_jinja2.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest
from django.template import engines
from django.test import TestCase
from wagtail.models import PAGE_TEMPLATE_VAR, Page, Site
from wagtail.test.utils import WagtailTestUtils
class TestCoreJinja(TestCase, WagtailTestUtils):
def setUp(self):
self.engine = engines["jinja2"]
self.user = self.create_superuser(
username="test", email="test@email.com", password="password"
)
self.homepage = Page.objects.get(id=2)
def render(self, string, context=None, request_context=True):
if context is None:
context = {}
template = self.engine.from_string(string)
return template.render(context)
def dummy_request(self, user=None):
site = Site.objects.get(is_default_site=True)
request = HttpRequest()
request.META["HTTP_HOST"] = site.hostname
request.META["SERVER_PORT"] = site.port
request.user = user or AnonymousUser()
return request
def test_userbar(self):
content = self.render(
"{{ wagtailuserbar() }}",
{
PAGE_TEMPLATE_VAR: self.homepage,
"request": self.dummy_request(self.user),
},
)
self.assertIn("<!-- Wagtail user bar embed code -->", content)
def test_userbar_anonymous_user(self):
content = self.render(
"{{ wagtailuserbar() }}",
{PAGE_TEMPLATE_VAR: self.homepage, "request": self.dummy_request()},
)
# Make sure nothing was rendered
self.assertEqual(content, "")
| 31.283019 | 80 | 0.630277 | from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest
from django.template import engines
from django.test import TestCase
from wagtail.models import PAGE_TEMPLATE_VAR, Page, Site
from wagtail.test.utils import WagtailTestUtils
class TestCoreJinja(TestCase, WagtailTestUtils):
def setUp(self):
self.engine = engines["jinja2"]
self.user = self.create_superuser(
username="test", email="test@email.com", password="password"
)
self.homepage = Page.objects.get(id=2)
def render(self, string, context=None, request_context=True):
if context is None:
context = {}
template = self.engine.from_string(string)
return template.render(context)
def dummy_request(self, user=None):
site = Site.objects.get(is_default_site=True)
request = HttpRequest()
request.META["HTTP_HOST"] = site.hostname
request.META["SERVER_PORT"] = site.port
request.user = user or AnonymousUser()
return request
def test_userbar(self):
content = self.render(
"{{ wagtailuserbar() }}",
{
PAGE_TEMPLATE_VAR: self.homepage,
"request": self.dummy_request(self.user),
},
)
self.assertIn("<!-- Wagtail user bar embed code -->", content)
def test_userbar_anonymous_user(self):
content = self.render(
"{{ wagtailuserbar() }}",
{PAGE_TEMPLATE_VAR: self.homepage, "request": self.dummy_request()},
)
self.assertEqual(content, "")
| true | true |
f7f57e45f77bb0814cd3ee2ef6492a0c115ad7a6 | 3,020 | py | Python | axopy/pipeline/sources.py | jdekarske/axopy | a60abb44a12c5e833b9033170825773bb0691394 | [
"MIT"
] | 13 | 2019-01-23T13:00:00.000Z | 2022-01-17T14:45:32.000Z | axopy/pipeline/sources.py | jdekarske/axopy | a60abb44a12c5e833b9033170825773bb0691394 | [
"MIT"
] | 45 | 2017-01-16T22:44:59.000Z | 2018-12-18T18:43:08.000Z | axopy/pipeline/sources.py | jdekarske/axopy | a60abb44a12c5e833b9033170825773bb0691394 | [
"MIT"
] | 5 | 2017-01-13T00:14:08.000Z | 2018-11-08T13:16:24.000Z | """Data streams for processing with a pipeline."""
import warnings
import numpy as np
def segment(data, length, overlap=0):
"""Generate segments of an array.
Each segment is of a specified length and optional overlap with the
previous segment. Only segments of the specified length are retrieved (if
segments don't fit evenly into the data).
Parameters
----------
data : array, shape (n_channels, n_samples)
Data to segment.
length : int
Number of samples to retrieve in each chunk.
overlap : int, optional
Number of overlapping samples in consecutive chunks.
Yields
------
segment : array (n_channels, length)
Segment of the input array.
Examples
--------
Segment a 2-channel recording:
>>> import numpy as np
>>> from axopy.pipeline import segment
>>> x = np.arange(8).reshape(2, 4)
>>> x
array([[0, 1, 2, 3],
[4, 5, 6, 7]])
>>> seg = segment(x, 2)
>>> next(seg)
array([[0, 1],
[4, 5]])
>>> next(seg)
array([[2, 3],
[6, 7]])
Consecutive segments with overlapping samples agree:
>>> seg = segment(x, 3, overlap=2)
>>> next(seg)
array([[0, 1, 2],
[4, 5, 6]])
>>> next(seg)
array([[1, 2, 3],
[5, 6, 7]])
"""
data = np.atleast_2d(data)
n = data.shape[1]
for f, t in segment_indices(n, length, overlap=overlap):
yield data[:, f:t]
def segment_indices(n, length, overlap=0):
"""Generate indices to segment an array.
Each segment is of a specified length with optional overlap with the
previous segment. Only segments of the specified length are retrieved if
they don't fit evenly into the the total length. The indices returned are
meant to be used for slicing, e.g. ``data[:, from:to]``.
Parameters
----------
n : int
Number of samples to segment up.
length : int
Length of each segment.
overlap : int, optional
Number of overlapping samples in consecutive segments.
Yields
------
from : int
Index of the beginning of the segment with respect to the input array.
to : int
Index of the end of the segement with respect to the input array.
Examples
--------
Basic usage -- segment a 6-sample recording into segments of length 2:
>>> import numpy as np
>>> from axopy.pipeline import segment_indices
>>> list(segment_indices(6, 2))
[(0, 2), (2, 4), (4, 6)]
Overlapping segments:
>>> list(segment_indices(11, 5, overlap=2))
[(0, 5), (3, 8), (6, 11)]
"""
skip = length - overlap
if (n - length) % skip != 0:
warnings.warn("Data (length {}) cannot be chunked evenly into "
"segments of length {} with overlap {}".format(
n, length, overlap),
UserWarning)
for i in range(0, n, skip):
if i + length <= n:
yield i, i + length
| 27.207207 | 78 | 0.57649 |
import warnings
import numpy as np
def segment(data, length, overlap=0):
data = np.atleast_2d(data)
n = data.shape[1]
for f, t in segment_indices(n, length, overlap=overlap):
yield data[:, f:t]
def segment_indices(n, length, overlap=0):
skip = length - overlap
if (n - length) % skip != 0:
warnings.warn("Data (length {}) cannot be chunked evenly into "
"segments of length {} with overlap {}".format(
n, length, overlap),
UserWarning)
for i in range(0, n, skip):
if i + length <= n:
yield i, i + length
| true | true |
f7f57f8ae16fcbcdde095c7223d6c9f1f054ceb4 | 4,456 | py | Python | donkeycar/parts/transform.py | swapdha/donkey | 6f9db7bbd50db48222ff1ecdf61fb00456ef5e79 | [
"MIT"
] | 1 | 2019-02-05T08:38:06.000Z | 2019-02-05T08:38:06.000Z | donkeycar/parts/transform.py | swapdha/donkey | 6f9db7bbd50db48222ff1ecdf61fb00456ef5e79 | [
"MIT"
] | null | null | null | donkeycar/parts/transform.py | swapdha/donkey | 6f9db7bbd50db48222ff1ecdf61fb00456ef5e79 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
class Lambda:
"""
Wraps a function into a donkey part.
"""
def __init__(self, f):
"""
Accepts the function to use.
"""
self.f = f
def run(self, *args, **kwargs):
return self.f(*args, **kwargs)
def shutdown(self):
return
class PIDController:
""" Performs a PID computation and returns a control value.
This is based on the elapsed time (dt) and the current value of the process variable
(i.e. the thing we're measuring and trying to change).
https://github.com/chrisspen/pid_controller/blob/master/pid_controller/pid.py
"""
def __init__(self, p=0, i=0, d=0, debug=False):
# initialize gains
self.Kp = p
self.Ki = i
self.Kd = d
# The value the controller is trying to get the system to achieve.
self.target = 0
# initialize delta t variables
self.prev_tm = time.time()
self.prev_feedback = 0
self.error = None
# initialize the output
self.alpha = 0
# debug flag (set to True for console output)
self.debug = debug
def run(self, target_value, feedback):
curr_tm = time.time()
self.target = target_value
error = self.error = self.target - feedback
# Calculate time differential.
dt = curr_tm - self.prev_tm
# Initialize output variable.
curr_alpha = 0
# Add proportional component.
curr_alpha += self.Kp * error
# Add integral component.
curr_alpha += self.Ki * (error * dt)
# Add differential component (avoiding divide-by-zero).
if dt > 0:
curr_alpha += self.Kd * ((feedback - self.prev_feedback) / float(dt))
# Maintain memory for next loop.
self.prev_tm = curr_tm
self.prev_feedback = feedback
# Update the output
self.alpha = curr_alpha
if (self.debug):
print('PID target value:', round(target_value, 4))
print('PID feedback value:', round(feedback, 4))
print('PID output:', round(curr_alpha, 4))
return curr_alpha
def twiddle(evaluator, tol=0.001, params=3, error_cmp=None, initial_guess=None):
"""
A coordinate descent parameter tuning algorithm.
https://github.com/chrisspen/pid_controller/blob/master/pid_controller/pid.py
https://en.wikipedia.org/wiki/Coordinate_descent
Params:
evaluator := callable that will be passed a series of number parameters, which will return
an error measure
tol := tolerance threshold, the smaller the value, the greater the tuning
params := the number of parameters to tune
error_cmp := a callable that takes two error measures (the current and last best)
and returns true if the first is less than the second
initial_guess := parameters to begin tuning with
"""
def _error_cmp(a, b):
# Returns true if a is closer to zero than b.
return abs(a) < abs(b)
if error_cmp is None:
error_cmp = _error_cmp
if initial_guess is None:
p = [0]*params
else:
p = list(initial_guess)
dp = [1]*params
best_err = evaluator(*p)
steps = 0
while sum(dp) > tol:
steps += 1
print('steps:', steps, 'tol:', tol, 'best error:', best_err)
for i, _ in enumerate(p):
# first try to increase param
p[i] += dp[i]
err = evaluator(*p)
if error_cmp(err, best_err):
# Increasing param reduced error, so record and continue to increase dp range.
best_err = err
dp[i] *= 1.1
else:
# Otherwise, increased error, so undo and try decreasing dp
p[i] -= 2.*dp[i]
err = evaluator(*p)
if error_cmp(err, best_err):
# Decreasing param reduced error, so record and continue to increase dp range.
best_err = err
dp[i] *= 1.1
else:
# Otherwise, reset param and reduce dp range.
p[i] += dp[i]
dp[i] *= 0.9
return p
| 29.124183 | 98 | 0.549596 |
import time
class Lambda:
def __init__(self, f):
self.f = f
def run(self, *args, **kwargs):
return self.f(*args, **kwargs)
def shutdown(self):
return
class PIDController:
def __init__(self, p=0, i=0, d=0, debug=False):
self.Kp = p
self.Ki = i
self.Kd = d
self.target = 0
self.prev_tm = time.time()
self.prev_feedback = 0
self.error = None
self.alpha = 0
self.debug = debug
def run(self, target_value, feedback):
curr_tm = time.time()
self.target = target_value
error = self.error = self.target - feedback
dt = curr_tm - self.prev_tm
curr_alpha = 0
curr_alpha += self.Kp * error
curr_alpha += self.Ki * (error * dt)
if dt > 0:
curr_alpha += self.Kd * ((feedback - self.prev_feedback) / float(dt))
self.prev_tm = curr_tm
self.prev_feedback = feedback
self.alpha = curr_alpha
if (self.debug):
print('PID target value:', round(target_value, 4))
print('PID feedback value:', round(feedback, 4))
print('PID output:', round(curr_alpha, 4))
return curr_alpha
def twiddle(evaluator, tol=0.001, params=3, error_cmp=None, initial_guess=None):
def _error_cmp(a, b):
return abs(a) < abs(b)
if error_cmp is None:
error_cmp = _error_cmp
if initial_guess is None:
p = [0]*params
else:
p = list(initial_guess)
dp = [1]*params
best_err = evaluator(*p)
steps = 0
while sum(dp) > tol:
steps += 1
print('steps:', steps, 'tol:', tol, 'best error:', best_err)
for i, _ in enumerate(p):
p[i] += dp[i]
err = evaluator(*p)
if error_cmp(err, best_err):
best_err = err
dp[i] *= 1.1
else:
p[i] -= 2.*dp[i]
err = evaluator(*p)
if error_cmp(err, best_err):
best_err = err
dp[i] *= 1.1
else:
p[i] += dp[i]
dp[i] *= 0.9
return p
| true | true |
f7f57fd9406fda4ec772a4a95816b704f1fd60ba | 5,968 | py | Python | renderer/renderer.py | h3nnn4n/colosseum_renderer | e8fb2e94ca333b4465b2cb4283822338f07d69ff | [
"MIT"
] | null | null | null | renderer/renderer.py | h3nnn4n/colosseum_renderer | e8fb2e94ca333b4465b2cb4283822338f07d69ff | [
"MIT"
] | null | null | null | renderer/renderer.py | h3nnn4n/colosseum_renderer | e8fb2e94ca333b4465b2cb4283822338f07d69ff | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import itertools
import sys
from time import sleep, time
import numpy as np
import pygame
from pygame.colordict import THECOLORS as colors
def load_image(name):
image = pygame.image.load(name).convert_alpha()
return image
def get_food_sprite():
image = load_image("./renderer/sprites/food.png")
image = pygame.transform.scale(image, (20, 30))
return image
def get_base_sprite():
image = load_image("./renderer/sprites/base.png")
image = pygame.transform.scale(image, (20, 20))
return image
def get_actor_sprite():
image = load_image("./renderer/sprites/actor.png")
image = pygame.transform.scale(image, (20, 20))
return image
class Renderer:
def __init__(self):
pygame.init()
self.size = 600 * 1.5, 600 * 1.5
self.screen = pygame.display.set_mode(self.size)
self._data = None
self._current_tick = None
self.clock = pygame.time.Clock()
self.font = pygame.font.Font(pygame.font.get_default_font(), 16)
self.food_sprite = get_food_sprite()
self.actor_sprite = get_actor_sprite()
self.base_sprite = get_base_sprite()
# FIXME: This should be read from the replay file
self._scale = np.array(self.size) / np.array([40, 40])
# Update the game state 30 times per second
self.tick_duration = 1.0 / 30.0
self._target_frame_duration = 1.0 / 60.0
self._frame_timer = time()
self._tick_timer = time()
self.color_map = {}
self.agent_colors = [
colors["cadetblue"],
colors["mediumorchid3"],
colors["yellow3"],
colors["darkolivegreen3"],
]
def set_data(self, data):
self._data = data
self._current_tick = 0
first_data = data[0]
bases = first_data["world_state"]["bases"]
agent_ids = [base["owner_id"] for base in bases]
for index, agent_id in enumerate(agent_ids):
self.color_map[agent_id] = self.agent_colors[index]
def _advance_tick(self):
now = time()
if now - self._tick_timer < self.tick_duration:
return
self._tick_timer = now
self._current_tick += 1
if self._current_tick >= len(self._data):
self._current_tick = 0
@property
def data(self):
return self._data[self._current_tick]
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
self.clock.tick()
self._advance_tick()
self.screen.fill(colors["white"])
world_state = self.data["world_state"]
actions = list(
itertools.chain.from_iterable(
[x["actions"] for x in self.data["agent_actions"]]
)
)
foods = world_state["foods"]
actors = world_state["actors"]
bases = world_state["bases"]
# FIXME: Each agent should have its own color
for base in bases:
self._draw_base(base)
for actor in actors:
position = np.array(actor["position"]) * self._scale
self._draw_actor(position, actor["owner_id"])
for action in actions:
if action.get("actor_id") == actor["id"]:
if action["action"] == "move":
pygame.draw.line(
self.screen,
colors["gray"],
np.array(actor["position"]) * self._scale,
np.array(action["target"]) * self._scale,
2,
)
if action["action"] == "attack":
pygame.draw.line(
self.screen,
colors["red"],
np.array(actor["position"]) * self._scale,
np.array(self._get_object_position(action["target"]))
* self._scale,
4,
)
for food in foods:
position = np.array(food["position"]) * self._scale
self._draw_food(position)
now = time()
diff = self._target_frame_duration - (now - self._frame_timer)
if diff < self._target_frame_duration and diff > 0:
sleep(diff)
self._text(f"fps: {self.clock.get_fps():6.2f}", (10, 10))
self._text(f" {diff:.4f} (ms)", (10, 30))
self._frame_timer = time()
pygame.display.flip()
def _get_object_position(self, object_id):
objects = [
self.data["world_state"]["foods"],
self.data["world_state"]["actors"],
self.data["world_state"]["bases"],
]
for obj in itertools.chain.from_iterable(objects):
if obj["id"] == object_id:
return obj["position"]
def _text(self, text, position, antialias=True, color=(220, 230, 225)):
text_surface = self.font.render(text, antialias, color)
self.screen.blit(text_surface, dest=position)
def _draw_actor(self, position, owner_id):
color = self.color_map[owner_id]
pygame.draw.circle(self.screen, color, position, 14, 0)
self.screen.blit(self.actor_sprite, position + np.array([-10, -10]))
def _draw_base(self, base):
position = np.array(base["position"]) * self._scale
color = self.color_map[base["owner_id"]]
pygame.draw.circle(self.screen, color, position, 14, 0)
self.screen.blit(self.base_sprite, position + np.array([-10, -10]))
self._text(
f"food: {base['food']:.1f}",
position + np.array([-7, -22]),
color=colors["brown3"],
)
def _draw_food(self, position):
self.screen.blit(self.food_sprite, position + np.array([-10, -25]))
| 31.246073 | 81 | 0.553284 |
import itertools
import sys
from time import sleep, time
import numpy as np
import pygame
from pygame.colordict import THECOLORS as colors
def load_image(name):
image = pygame.image.load(name).convert_alpha()
return image
def get_food_sprite():
image = load_image("./renderer/sprites/food.png")
image = pygame.transform.scale(image, (20, 30))
return image
def get_base_sprite():
image = load_image("./renderer/sprites/base.png")
image = pygame.transform.scale(image, (20, 20))
return image
def get_actor_sprite():
image = load_image("./renderer/sprites/actor.png")
image = pygame.transform.scale(image, (20, 20))
return image
class Renderer:
def __init__(self):
pygame.init()
self.size = 600 * 1.5, 600 * 1.5
self.screen = pygame.display.set_mode(self.size)
self._data = None
self._current_tick = None
self.clock = pygame.time.Clock()
self.font = pygame.font.Font(pygame.font.get_default_font(), 16)
self.food_sprite = get_food_sprite()
self.actor_sprite = get_actor_sprite()
self.base_sprite = get_base_sprite()
self._scale = np.array(self.size) / np.array([40, 40])
self.tick_duration = 1.0 / 30.0
self._target_frame_duration = 1.0 / 60.0
self._frame_timer = time()
self._tick_timer = time()
self.color_map = {}
self.agent_colors = [
colors["cadetblue"],
colors["mediumorchid3"],
colors["yellow3"],
colors["darkolivegreen3"],
]
def set_data(self, data):
self._data = data
self._current_tick = 0
first_data = data[0]
bases = first_data["world_state"]["bases"]
agent_ids = [base["owner_id"] for base in bases]
for index, agent_id in enumerate(agent_ids):
self.color_map[agent_id] = self.agent_colors[index]
def _advance_tick(self):
now = time()
if now - self._tick_timer < self.tick_duration:
return
self._tick_timer = now
self._current_tick += 1
if self._current_tick >= len(self._data):
self._current_tick = 0
@property
def data(self):
return self._data[self._current_tick]
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
self.clock.tick()
self._advance_tick()
self.screen.fill(colors["white"])
world_state = self.data["world_state"]
actions = list(
itertools.chain.from_iterable(
[x["actions"] for x in self.data["agent_actions"]]
)
)
foods = world_state["foods"]
actors = world_state["actors"]
bases = world_state["bases"]
for base in bases:
self._draw_base(base)
for actor in actors:
position = np.array(actor["position"]) * self._scale
self._draw_actor(position, actor["owner_id"])
for action in actions:
if action.get("actor_id") == actor["id"]:
if action["action"] == "move":
pygame.draw.line(
self.screen,
colors["gray"],
np.array(actor["position"]) * self._scale,
np.array(action["target"]) * self._scale,
2,
)
if action["action"] == "attack":
pygame.draw.line(
self.screen,
colors["red"],
np.array(actor["position"]) * self._scale,
np.array(self._get_object_position(action["target"]))
* self._scale,
4,
)
for food in foods:
position = np.array(food["position"]) * self._scale
self._draw_food(position)
now = time()
diff = self._target_frame_duration - (now - self._frame_timer)
if diff < self._target_frame_duration and diff > 0:
sleep(diff)
self._text(f"fps: {self.clock.get_fps():6.2f}", (10, 10))
self._text(f" {diff:.4f} (ms)", (10, 30))
self._frame_timer = time()
pygame.display.flip()
def _get_object_position(self, object_id):
objects = [
self.data["world_state"]["foods"],
self.data["world_state"]["actors"],
self.data["world_state"]["bases"],
]
for obj in itertools.chain.from_iterable(objects):
if obj["id"] == object_id:
return obj["position"]
def _text(self, text, position, antialias=True, color=(220, 230, 225)):
text_surface = self.font.render(text, antialias, color)
self.screen.blit(text_surface, dest=position)
def _draw_actor(self, position, owner_id):
color = self.color_map[owner_id]
pygame.draw.circle(self.screen, color, position, 14, 0)
self.screen.blit(self.actor_sprite, position + np.array([-10, -10]))
def _draw_base(self, base):
position = np.array(base["position"]) * self._scale
color = self.color_map[base["owner_id"]]
pygame.draw.circle(self.screen, color, position, 14, 0)
self.screen.blit(self.base_sprite, position + np.array([-10, -10]))
self._text(
f"food: {base['food']:.1f}",
position + np.array([-7, -22]),
color=colors["brown3"],
)
def _draw_food(self, position):
self.screen.blit(self.food_sprite, position + np.array([-10, -25]))
| true | true |
f7f580a6f147126fac2e6cb06c27e2e83ea87609 | 2,949 | py | Python | numtotext/convert.py | poikNplop/pynumtotext | 871a6049f150353bd301f027ac5d0d78f1a4fbe6 | [
"MIT"
] | null | null | null | numtotext/convert.py | poikNplop/pynumtotext | 871a6049f150353bd301f027ac5d0d78f1a4fbe6 | [
"MIT"
] | null | null | null | numtotext/convert.py | poikNplop/pynumtotext | 871a6049f150353bd301f027ac5d0d78f1a4fbe6 | [
"MIT"
] | null | null | null | from math import log10
from typing import Type
from .languages.base import NumbersBase
from .languages.en import Numbers_EN
def int_to_words(n: int, numbers: Type[NumbersBase] = Numbers_EN) -> str:
n = int(n)
if n == 0:
# ZERO is special case
return numbers.ZERO
elif n < 0:
# prefix with 'minus', and treat the rest of the number as positive
text = [numbers.NEGATIVE, " "]
n = abs(n)
else:
# no 'minus' prefix
text = []
lastsize = None
# check for presence of largest part, then smaller
# (check for thousands, then hundreds, ..., 19 before 10, down to 1)
for size, word in sorted(numbers.NUMBERS.items(), reverse=True):
if n <= 0:
# No remaining number (taken by a previous iteration)
break
# split into part (* size), n (remainder)
# e.g. n=12345, size=1000 -> part=12, n=345
part = n // size
n %= size
if part < 1:
# No number in this part (go check a smaller part)
continue
# No join for first part (no previous size)
if lastsize is not None:
# Fetch join between previous and next part
k = int(log10(lastsize))
# 10_000 -> 10 would include 4,3,2 (not 1)
for i in range(int(log10(lastsize)), int(log10(size)), -1):
if i in numbers.JOINS:
# if the join exists, use it, otherwise check for lower joins
k = i
break
text.append(numbers.JOINS.get(k, " "))
if part != 1 or size >= numbers.ONE_THRESHOLD:
# Add a prefix ([three] hundred; [one] thousand)
# Recalculate sub-part (e.g. 123000 -> [one hundred and twenty-three] thousand)
text.append(int_to_words(part, numbers=numbers))
text.append(numbers.COUNT_JOIN)
text.append(word)
lastsize = size
return "".join(text).strip()
def float_to_words(n: float, precision: int = 10, numbers: Type[NumbersBase] = Numbers_EN) -> str:
n = float(n)
if n == 0:
# ZERO is special case
return numbers.ZERO
elif n < 0:
# prefix with 'minus', and treat the rest of the number as positive
text = numbers.NEGATIVE + " "
n = abs(n)
else:
text = ""
# for part before decimal point, convert as integer
text += int_to_words(int(n), numbers=numbers)
# 123.456 -> 4.56
n = (n % 1) * 10
if n == 0:
# no decimal part; return the generated integer part
return text.strip()
# add the decimal point name
text += numbers.JOINS.get(0, " point ")
# add the names of each digit in turn
while n:
text += numbers.NUMBERS[int(n)] + " "
if precision is not None:
n = round(n % 1, precision) * 10
else:
n = (n % 1) * 10
return text.strip()
| 31.042105 | 98 | 0.557816 | from math import log10
from typing import Type
from .languages.base import NumbersBase
from .languages.en import Numbers_EN
def int_to_words(n: int, numbers: Type[NumbersBase] = Numbers_EN) -> str:
n = int(n)
if n == 0:
return numbers.ZERO
elif n < 0:
text = [numbers.NEGATIVE, " "]
n = abs(n)
else:
text = []
lastsize = None
for size, word in sorted(numbers.NUMBERS.items(), reverse=True):
if n <= 0:
break
part = n // size
n %= size
if part < 1:
continue
if lastsize is not None:
k = int(log10(lastsize))
for i in range(int(log10(lastsize)), int(log10(size)), -1):
if i in numbers.JOINS:
k = i
break
text.append(numbers.JOINS.get(k, " "))
if part != 1 or size >= numbers.ONE_THRESHOLD:
text.append(int_to_words(part, numbers=numbers))
text.append(numbers.COUNT_JOIN)
text.append(word)
lastsize = size
return "".join(text).strip()
def float_to_words(n: float, precision: int = 10, numbers: Type[NumbersBase] = Numbers_EN) -> str:
n = float(n)
if n == 0:
return numbers.ZERO
elif n < 0:
text = numbers.NEGATIVE + " "
n = abs(n)
else:
text = ""
text += int_to_words(int(n), numbers=numbers)
n = (n % 1) * 10
if n == 0:
return text.strip()
text += numbers.JOINS.get(0, " point ")
while n:
text += numbers.NUMBERS[int(n)] + " "
if precision is not None:
n = round(n % 1, precision) * 10
else:
n = (n % 1) * 10
return text.strip()
| true | true |
f7f58157a4ea7cc06f543dcf34904436c17df17f | 824 | py | Python | virtual/lib/python3.8/site-packages/werkzeug/wrappers/auth.py | Esther-Anyona/mylearner | d49d1c4c8dbeb93cc384f2037c48236be5dc89e1 | [
"MIT"
] | 20 | 2022-01-20T15:26:57.000Z | 2022-02-10T21:55:03.000Z | virtual/lib/python3.8/site-packages/werkzeug/wrappers/auth.py | Esther-Anyona/mylearner | d49d1c4c8dbeb93cc384f2037c48236be5dc89e1 | [
"MIT"
] | 3 | 2022-03-18T06:16:45.000Z | 2022-03-23T14:26:58.000Z | virtual/lib/python3.8/site-packages/werkzeug/wrappers/auth.py | Esther-Anyona/mylearner | d49d1c4c8dbeb93cc384f2037c48236be5dc89e1 | [
"MIT"
] | 11 | 2022-01-20T20:33:55.000Z | 2022-02-16T02:00:51.000Z | import typing as t
import warnings
class AuthorizationMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'AuthorizationMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class WWWAuthenticateMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'WWWAuthenticateMixin' is deprecated and will be removed"
" in Werkzeug 2.1. 'Response' now includes the"
" functionality directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| 30.518519 | 71 | 0.584951 | import typing as t
import warnings
class AuthorizationMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'AuthorizationMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class WWWAuthenticateMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'WWWAuthenticateMixin' is deprecated and will be removed"
" in Werkzeug 2.1. 'Response' now includes the"
" functionality directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| true | true |
f7f582747464a2619533996ccfc6f93e75c468a4 | 407 | py | Python | client/lib/jtalk.py | jphacks/TK_1621 | 1de15ba79744087b13bfa400dc3d0eb3f90030a8 | [
"MIT"
] | 5 | 2016-10-28T14:11:06.000Z | 2021-01-22T01:07:47.000Z | client/lib/jtalk.py | jphacks/TK_1621 | 1de15ba79744087b13bfa400dc3d0eb3f90030a8 | [
"MIT"
] | null | null | null | client/lib/jtalk.py | jphacks/TK_1621 | 1de15ba79744087b13bfa400dc3d0eb3f90030a8 | [
"MIT"
] | 3 | 2016-10-30T01:55:24.000Z | 2017-10-28T19:51:07.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# created by Keisuke Okumura
# == Kei18 PC ==
# sudo open_jtalk -x /usr/local/Cellar/open-jtalk/1.09/dic
# -m /usr/local/Cellar/open-jtalk/1.09/voice/mei/mei_normal.htsvoice
# -ow out.wav sample.txt
#
# afplay out.wav
import subprocess
def speak(t):
script = '/home/pi/Desktop/client/lib/jtalk.sh'
cmd = ['sh', script, t]
subprocess.call(cmd)
| 20.35 | 68 | 0.663391 |
import subprocess
def speak(t):
script = '/home/pi/Desktop/client/lib/jtalk.sh'
cmd = ['sh', script, t]
subprocess.call(cmd)
| true | true |
f7f582a430dcae315464eccc27574a1e8df88cc6 | 6,111 | py | Python | refnx/analysis/test/test_globalfitting.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 32 | 2016-04-18T15:29:59.000Z | 2022-03-27T08:35:29.000Z | refnx/analysis/test/test_globalfitting.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 116 | 2015-10-27T04:33:09.000Z | 2022-02-22T02:02:47.000Z | refnx/analysis/test/test_globalfitting.py | dcortie/refnx | 037434fa0a64755f72c540d75063986bd517ab10 | [
"BSD-3-Clause"
] | 22 | 2015-09-29T23:21:15.000Z | 2022-02-27T18:12:18.000Z | """
Test co-refinement of datasets by fitting 3 neutron reflectivity datasets. The
overall construction of the models can be done in a few different ways.
"""
import os.path
import numpy as np
from numpy.testing import (
assert_,
assert_equal,
assert_almost_equal,
assert_allclose,
)
from refnx.analysis import CurveFitter, Objective, GlobalObjective, Transform
from refnx.dataset import ReflectDataset
from refnx.reflect import Slab, SLD, ReflectModel
SEED = 1
class TestGlobalFitting:
def setup_method(self):
self.pth = os.path.dirname(os.path.abspath(__file__))
self.si = SLD(2.07, name="Si")
self.sio2 = SLD(3.47, name="SiO2")
self.d2o = SLD(6.36, name="d2o")
self.h2o = SLD(-0.56, name="h2o")
self.cm3 = SLD(3.5, name="cm3")
self.polymer = SLD(2, name="polymer")
self.sio2_l = self.sio2(40, 3)
self.polymer_l = self.polymer(200, 3)
self.structure = (
self.si | self.sio2_l | self.polymer_l | self.d2o(0, 3)
)
fname = os.path.join(self.pth, "c_PLP0011859_q.txt")
self.dataset = ReflectDataset(fname)
self.model = ReflectModel(self.structure, bkg=2e-7)
self.objective = Objective(
self.model,
self.dataset,
use_weights=False,
transform=Transform("logY"),
)
self.global_objective = GlobalObjective([self.objective])
def test_residuals_length(self):
# the residuals should be the same length as the data
residuals = self.global_objective.residuals()
assert_equal(residuals.size, len(self.dataset))
def test_globalfitting(self):
# smoke test for can the global fitting run?
# also tests that global fitting gives same output as
# normal fitting (for a single dataset)
self.model.scale.setp(vary=True, bounds=(0.1, 2))
self.model.bkg.setp(vary=True, bounds=(1e-10, 8e-6))
self.structure[-1].rough.setp(vary=True, bounds=(0.2, 6))
self.sio2_l.thick.setp(vary=True, bounds=(0.2, 80))
self.polymer_l.thick.setp(bounds=(0.01, 400), vary=True)
self.polymer_l.sld.real.setp(vary=True, bounds=(0.01, 4))
self.objective.transform = Transform("logY")
starting = np.array(self.objective.parameters)
with np.errstate(invalid="raise"):
g = CurveFitter(self.global_objective)
res_g = g.fit()
# need the same starting point
self.objective.setp(starting)
f = CurveFitter(self.objective)
res_f = f.fit()
# individual and global should give the same fit.
assert_almost_equal(res_g.x, res_f.x)
def test_multipledataset_corefinement(self):
# test corefinement of three datasets
data361 = ReflectDataset(os.path.join(self.pth, "e361r.txt"))
data365 = ReflectDataset(os.path.join(self.pth, "e365r.txt"))
data366 = ReflectDataset(os.path.join(self.pth, "e366r.txt"))
si = SLD(2.07, name="Si")
sio2 = SLD(3.47, name="SiO2")
d2o = SLD(6.36, name="d2o")
h2o = SLD(-0.56, name="h2o")
cm3 = SLD(3.47, name="cm3")
polymer = SLD(1, name="polymer")
structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
structure365 = si | structure361[1] | structure361[2] | cm3(0, 3)
structure366 = si | structure361[1] | structure361[2] | h2o(0, 3)
structure365[-1].rough = structure361[-1].rough
structure366[-1].rough = structure361[-1].rough
structure361[1].thick.setp(vary=True, bounds=(0, 20))
structure361[2].thick.setp(
value=200.0, bounds=(200.0, 250.0), vary=True
)
structure361[2].sld.real.setp(vary=True, bounds=(0, 2))
structure361[2].vfsolv.setp(value=5.0, bounds=(0.0, 100.0), vary=True)
model361 = ReflectModel(structure361, bkg=2e-5)
model365 = ReflectModel(structure365, bkg=2e-5)
model366 = ReflectModel(structure366, bkg=2e-5)
model361.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
model365.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
model366.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
objective361 = Objective(model361, data361)
objective365 = Objective(model365, data365)
objective366 = Objective(model366, data366)
global_objective = GlobalObjective(
[objective361, objective365, objective366]
)
# are the right numbers of parameters varying?
assert_equal(len(global_objective.varying_parameters()), 7)
# can we set the parameters?
global_objective.setp(np.array([1e-5, 10, 212, 1, 10, 1e-5, 1e-5]))
f = CurveFitter(global_objective)
f.fit()
indiv_chisqr = np.sum(
[objective.chisqr() for objective in global_objective.objectives]
)
# the overall chi2 should be sum of individual chi2
global_chisqr = global_objective.chisqr()
assert_almost_equal(global_chisqr, indiv_chisqr)
# now check that the parameters were held in common correctly.
slabs361 = structure361.slabs()
slabs365 = structure365.slabs()
slabs366 = structure366.slabs()
assert_equal(slabs365[0:2, 0:5], slabs361[0:2, 0:5])
assert_equal(slabs366[0:2, 0:5], slabs361[0:2, 0:5])
assert_equal(slabs365[-1, 3], slabs361[-1, 3])
assert_equal(slabs366[-1, 3], slabs361[-1, 3])
# check that the residuals are the correct lengths
res361 = objective361.residuals()
res365 = objective365.residuals()
res366 = objective366.residuals()
res_global = global_objective.residuals()
assert_allclose(res_global[0 : len(res361)], res361, rtol=1e-5)
assert_allclose(
res_global[len(res361) : len(res361) + len(res365)],
res365,
rtol=1e-5,
)
assert_allclose(
res_global[len(res361) + len(res365) :], res366, rtol=1e-5
)
repr(global_objective)
| 36.375 | 78 | 0.620684 | import os.path
import numpy as np
from numpy.testing import (
assert_,
assert_equal,
assert_almost_equal,
assert_allclose,
)
from refnx.analysis import CurveFitter, Objective, GlobalObjective, Transform
from refnx.dataset import ReflectDataset
from refnx.reflect import Slab, SLD, ReflectModel
SEED = 1
class TestGlobalFitting:
def setup_method(self):
self.pth = os.path.dirname(os.path.abspath(__file__))
self.si = SLD(2.07, name="Si")
self.sio2 = SLD(3.47, name="SiO2")
self.d2o = SLD(6.36, name="d2o")
self.h2o = SLD(-0.56, name="h2o")
self.cm3 = SLD(3.5, name="cm3")
self.polymer = SLD(2, name="polymer")
self.sio2_l = self.sio2(40, 3)
self.polymer_l = self.polymer(200, 3)
self.structure = (
self.si | self.sio2_l | self.polymer_l | self.d2o(0, 3)
)
fname = os.path.join(self.pth, "c_PLP0011859_q.txt")
self.dataset = ReflectDataset(fname)
self.model = ReflectModel(self.structure, bkg=2e-7)
self.objective = Objective(
self.model,
self.dataset,
use_weights=False,
transform=Transform("logY"),
)
self.global_objective = GlobalObjective([self.objective])
def test_residuals_length(self):
residuals = self.global_objective.residuals()
assert_equal(residuals.size, len(self.dataset))
def test_globalfitting(self):
self.model.scale.setp(vary=True, bounds=(0.1, 2))
self.model.bkg.setp(vary=True, bounds=(1e-10, 8e-6))
self.structure[-1].rough.setp(vary=True, bounds=(0.2, 6))
self.sio2_l.thick.setp(vary=True, bounds=(0.2, 80))
self.polymer_l.thick.setp(bounds=(0.01, 400), vary=True)
self.polymer_l.sld.real.setp(vary=True, bounds=(0.01, 4))
self.objective.transform = Transform("logY")
starting = np.array(self.objective.parameters)
with np.errstate(invalid="raise"):
g = CurveFitter(self.global_objective)
res_g = g.fit()
self.objective.setp(starting)
f = CurveFitter(self.objective)
res_f = f.fit()
assert_almost_equal(res_g.x, res_f.x)
def test_multipledataset_corefinement(self):
data361 = ReflectDataset(os.path.join(self.pth, "e361r.txt"))
data365 = ReflectDataset(os.path.join(self.pth, "e365r.txt"))
data366 = ReflectDataset(os.path.join(self.pth, "e366r.txt"))
si = SLD(2.07, name="Si")
sio2 = SLD(3.47, name="SiO2")
d2o = SLD(6.36, name="d2o")
h2o = SLD(-0.56, name="h2o")
cm3 = SLD(3.47, name="cm3")
polymer = SLD(1, name="polymer")
structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
structure365 = si | structure361[1] | structure361[2] | cm3(0, 3)
structure366 = si | structure361[1] | structure361[2] | h2o(0, 3)
structure365[-1].rough = structure361[-1].rough
structure366[-1].rough = structure361[-1].rough
structure361[1].thick.setp(vary=True, bounds=(0, 20))
structure361[2].thick.setp(
value=200.0, bounds=(200.0, 250.0), vary=True
)
structure361[2].sld.real.setp(vary=True, bounds=(0, 2))
structure361[2].vfsolv.setp(value=5.0, bounds=(0.0, 100.0), vary=True)
model361 = ReflectModel(structure361, bkg=2e-5)
model365 = ReflectModel(structure365, bkg=2e-5)
model366 = ReflectModel(structure366, bkg=2e-5)
model361.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
model365.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
model366.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
objective361 = Objective(model361, data361)
objective365 = Objective(model365, data365)
objective366 = Objective(model366, data366)
global_objective = GlobalObjective(
[objective361, objective365, objective366]
)
assert_equal(len(global_objective.varying_parameters()), 7)
global_objective.setp(np.array([1e-5, 10, 212, 1, 10, 1e-5, 1e-5]))
f = CurveFitter(global_objective)
f.fit()
indiv_chisqr = np.sum(
[objective.chisqr() for objective in global_objective.objectives]
)
global_chisqr = global_objective.chisqr()
assert_almost_equal(global_chisqr, indiv_chisqr)
slabs361 = structure361.slabs()
slabs365 = structure365.slabs()
slabs366 = structure366.slabs()
assert_equal(slabs365[0:2, 0:5], slabs361[0:2, 0:5])
assert_equal(slabs366[0:2, 0:5], slabs361[0:2, 0:5])
assert_equal(slabs365[-1, 3], slabs361[-1, 3])
assert_equal(slabs366[-1, 3], slabs361[-1, 3])
res361 = objective361.residuals()
res365 = objective365.residuals()
res366 = objective366.residuals()
res_global = global_objective.residuals()
assert_allclose(res_global[0 : len(res361)], res361, rtol=1e-5)
assert_allclose(
res_global[len(res361) : len(res361) + len(res365)],
res365,
rtol=1e-5,
)
assert_allclose(
res_global[len(res361) + len(res365) :], res366, rtol=1e-5
)
repr(global_objective)
| true | true |
f7f582bfe75a807f37c6dc30c126ec9b5ae596ba | 122,034 | py | Python | tensorflow/python/keras/_impl/keras/backend.py | destenson/tensorflow--tensorflow | 7f84d88d39f236e5c0cea492a2248782e696c972 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/_impl/keras/backend.py | destenson/tensorflow--tensorflow | 7f84d88d39f236e5c0cea492a2248782e696c972 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/_impl/keras/backend.py | destenson/tensorflow--tensorflow | 7f84d88d39f236e5c0cea492a2248782e696c972 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.layers import base as tf_base_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.training import moving_averages
from tensorflow.python.util import tf_inspect
py_all = all
py_sum = sum
# INTERNAL UTILS
# This is the default internal TF session used by Keras.
# It can be set manually via `set_session(sess)`.
_SESSION = None
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = {}
# This dictionary holds a mapping {graph: UID_DICT}.
# each UID_DICT is a dictionary mapping name prefixes to a current index,
# used for generating graph-specific string UIDs
# for various names (e.g. layer names).
_GRAPH_UID_DICTS = {}
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# The type of float to use throughout a session.
_FLOATX = 'float32'
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 10e-8
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
```python
>>> keras.backend.epsilon()
1e-08
```
"""
return _EPSILON
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Arguments:
value: float. New value of epsilon.
Example:
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-08
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
"""
global _EPSILON
_EPSILON = value
def floatx():
"""Returns the default float type, as a string.
E.g. 'float16', 'float32', 'float64'.
Returns:
String, the current default float type.
Example:
```python
>>> keras.backend.floatx()
'float32'
```
"""
return _FLOATX
def set_floatx(value):
"""Sets the default float type.
Arguments:
value: String; 'float16', 'float32', or 'float64'.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=_FLOATX)
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
```python
>>> keras.backend.image_data_format()
'channels_first'
```
"""
return _IMAGE_DATA_FORMAT
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Arguments:
data_format: string. `'channels_first'` or `'channels_last'`.
Example:
```python
>>> from keras import backend as K
>>> K.image_data_format()
'channels_first'
>>> K.set_image_data_format('channels_last')
>>> K.image_data_format()
'channels_last'
```
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format:', data_format)
_IMAGE_DATA_FORMAT = str(data_format)
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = ops.get_default_graph()
if graph not in tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS:
tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(
int)
layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
def reset_uids():
per_graph_layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS
keys = list(per_graph_layer_name_uids.keys())
for key in keys:
del per_graph_layer_name_uids[key]
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
ops.reset_default_graph()
reset_uids()
_SESSION = None
phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[ops.get_default_graph()] = phase
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
graph = ops.get_default_graph()
if graph not in _GRAPH_LEARNING_PHASES:
phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be ' '0 or 1.')
_GRAPH_LEARNING_PHASES[ops.get_default_graph()] = value
def get_session():
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Returns:
A TensorFlow session.
"""
global _SESSION
if ops.get_default_session() is not None:
session = ops.get_default_session()
else:
if _SESSION is None:
if not os.environ.get('OMP_NUM_THREADS'):
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
num_thread = int(os.environ.get('OMP_NUM_THREADS'))
config = config_pb2.ConfigProto(
intra_op_parallelism_threads=num_thread, allow_soft_placement=True)
_SESSION = session_module.Session(config=config)
session = _SESSION
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION = session
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
self.device = device
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
g = ops.get_default_graph()
op = _TfDeviceCaptureOp()
g._apply_device_functions(op)
return op.device
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('device_type should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
devices = get_session().list_devices()
return [x.name for x in devices if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
Tensorflow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
name_scope = ops.name_scope
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
v._uses_learning_phase = False
return v
v = variables_module.Variable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'get_shape'):
v._keras_shape = int_shape(value)
v._uses_learning_phase = False
return v
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = variables_module.global_variables()
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
x._uses_learning_phase = False
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
return tuple(x.get_shape().as_list())
except ValueError:
return None
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.get_shape()._dims
if dims is not None:
return len(dims)
return None
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32_ref'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32_ref'
```
"""
return x.dtype.base_dtype.name
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return to_dense(x).eval(session=get_session())
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(
init_ops.constant_initializer(0., dtype=tf_dtype)(shape), dtype, name)
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones tensor variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(
init_ops.constant_initializer(1., dtype=tf_dtype)(shape), dtype, name)
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with zeros.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.get_shape().as_list())
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Example:
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
<tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16>
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
<tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
def update(x, new_x):
return state_ops.assign(x, new_x)
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=False)
# LINEAR ALGEBRA
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
if axes is not None:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
else:
adj_x = None
adj_y = None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis=axis, keep_dims=keepdims)
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with miminum values of `x`.
"""
return math_ops.reduce_min(x, axis=axis, keep_dims=keepdims)
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis=axis, keep_dims=keepdims)
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis=axis, keep_dims=keepdims)
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
m = math_ops.reduce_mean(x, axis=axis, keep_dims=True)
devs_squared = math_ops.square(x - m)
return math_ops.reduce_mean(
devs_squared, axis=axis, keep_dims=keepdims)
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
return math_ops.sqrt(var(x, axis=axis, keepdims=keepdims))
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keep_dims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis=axis, keep_dims=keepdims)
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis=axis, keep_dims=keepdims)
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis=axis, keep_dims=keepdims)
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _to_tensor(0., x.dtype.base_dtype)
inf = _to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis=axis, keep_dims=keepdims)
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _to_tensor(min_value, x.dtype.base_dtype)
max_value = _to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(
x, reduction_axes, shift=None, name=None, keep_dims=False)
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
epsilon: Fuzz factor.
Returns:
A tensor.
"""
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all([is_sparse(x) for x in tensors]):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
def resize_images(x, height_factor, width_factor, data_format):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[2:]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor]).astype('int32'))
x = permute_dimensions(x, [0, 2, 3, 1])
x = image_ops.resize_nearest_neighbor(x, new_shape)
x = permute_dimensions(x, [0, 3, 1, 2])
x.set_shape((None, None, original_shape[2] * height_factor
if original_shape[2] is not None else None,
original_shape[3] * width_factor
if original_shape[3] is not None else None))
return x
elif data_format == 'channels_last':
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[1:3]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor]).astype('int32'))
x = image_ops.resize_nearest_neighbor(x, new_shape)
x.set_shape((None, original_shape[1] * height_factor
if original_shape[1] is not None else None,
original_shape[2] * width_factor
if original_shape[2] is not None else None, None))
return x
else:
raise ValueError('Invalid data_format:', data_format)
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format:', data_format)
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.get_shape().as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.get_shape()) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.get_shape().as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty seqence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
return x.eval(session=get_session())
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
"""
if tensors:
return get_session().run(tensors)
else:
return []
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
return logging_ops.Print(x, [x], message)
# GRAPH MANIPULATION
class Function(object):
"""Runs a computation graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: a name to help users identify what this function does.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` to a TensorFlow backend function '
'should be a list or tuple.')
if not isinstance(outputs, (list, tuple)):
raise TypeError('`outputs` of a TensorFlow backend function '
'should be a list or tuple.')
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a TensorFlow backend function '
'should be a list or tuple.')
self.inputs = list(inputs)
self.outputs = list(outputs)
with ops.control_dependencies(self.outputs):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
self.session_kwargs = session_kwargs
def __call__(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` should be a list or tuple.')
feed_dict = {}
for tensor, value in zip(self.inputs, inputs):
if is_sparse(tensor):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
feed_dict[tensor] = value
session = get_session()
updated = session.run(
self.outputs + [self.updates_op],
feed_dict=feed_dict,
**self.session_kwargs)
return updated[:len(self.outputs)]
def function(inputs, outputs, updates=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in.
"""
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getargspec(session_module.Session.run)[0] and
key not in tf_inspect.getargspec(Function.__init__)[0]):
msg = ('Invalid argument "%s" passed to K.function with Tensorflow '
'backend') % key
raise ValueError(msg)
return Function(inputs, outputs, updates=updates, **kwargs)
def gradients(loss, variables):
"""Returns the gradients of `variables` w.r.t. `loss`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Parameters;
input; tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; list of tensors.
Returns;
output; tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; list of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: tensor of temporal data of shape `(samples, time, ...)`
(at least 3D).
initial_states: tensor with shape (samples, output_dim)
(no time dimension),
containing the initial values for the states used in
the step function.
go_backwards: boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: a list of constant values passed at each step.
unroll: whether to unroll the RNN or to use a symbolic loop
(`while_loop` or `scan` depending on backend).
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
ndim = len(inputs.get_shape())
if ndim < 3:
raise ValueError('Input should be at least 3D.')
axes = [1, 0] + list(range(2, ndim))
inputs = array_ops.transpose(inputs, (axes))
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.get_shape()) == ndim - 1:
mask = expand_dims(mask)
mask = array_ops.transpose(mask, axes)
if constants is None:
constants = []
global uses_learning_phase # pylint: disable=global-variable-undefined
uses_learning_phase = False
if unroll:
if not inputs.get_shape()[0]:
raise ValueError('Unrolling requires a ' 'fixed number of timesteps.')
states = initial_states
successive_states = []
successive_outputs = []
input_list = array_ops.unstack(inputs)
if go_backwards:
input_list.reverse()
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for inp, mask_t in zip(input_list, mask_list):
output, new_states = step_function(inp, states + constants)
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors, but in our case
# the condition (mask) tensor is
# (nsamples, 1), and A and B are (nsamples, ndimensions).
# So we need to
# broadcast the mask to match the shape of A and B.
# That's what the tile call does,
# it just repeats the mask along its second dimension
# n times.
tiled_mask_t = array_ops.tile(mask_t,
array_ops.stack(
[1, array_ops.shape(output)[1]]))
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = array_ops.tile(mask_t,
array_ops.stack(
[1,
array_ops.shape(new_state)[1]]))
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
for inp in input_list:
output, states = step_function(inp, states + constants)
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
if go_backwards:
inputs = reverse(inputs, 0)
states = tuple(initial_states)
time_steps = array_ops.shape(inputs)[0]
outputs, _ = step_function(inputs[0], initial_states + constants)
output_ta = tensor_array_ops.TensorArray(
dtype=outputs.dtype, size=time_steps, tensor_array_name='output_ta')
input_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps, tensor_array_name='input_ta')
input_ta = input_ta.unstack(inputs)
time = constant_op.constant(0, dtype='int32', name='time')
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = input_ta.read(time)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
global uses_learning_phase # pylint: disable=global-variable-undefined
uses_learning_phase = True
for state, new_state in zip(states, new_states):
new_state.set_shape(state.get_shape())
tiled_mask_t = array_ops.tile(mask_t,
array_ops.stack(
[1, array_ops.shape(output)[1]]))
output = array_ops.where(tiled_mask_t, output, states[0])
new_states = [
array_ops.where(tiled_mask_t, new_states[i], states[i])
for i in range(len(states))
]
output_ta_t = output_ta_t.write(time, output)
return (time + 1, output_ta_t) + tuple(new_states)
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = input_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
global uses_learning_phase # pylint: disable=global-variable-undefined
uses_learning_phase = True
for state, new_state in zip(states, new_states):
new_state.set_shape(state.get_shape())
output_ta_t = output_ta_t.write(time, output)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_step,
loop_vars=(time, output_ta) + states,
parallel_iterations=32,
swap_memory=True)
last_time = final_outputs[0]
output_ta = final_outputs[1]
new_states = final_outputs[2:]
outputs = output_ta.stack()
last_output = output_ta.read(last_time - 1)
axes = [1, 0] + list(range(2, len(outputs.get_shape())))
outputs = array_ops.transpose(outputs, axes)
last_output._uses_learning_phase = uses_learning_phase
return last_output, outputs, new_states
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
if training is 1 or training is True:
if callable(x):
return x()
else:
return x
elif training is 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
if uses_learning_phase:
x._uses_learning_phase = True
return x
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
def relu(x, alpha=0., max_value=None):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: Saturation threshold.
Returns:
A tensor.
"""
if alpha != 0.:
negative_part = nn.relu(-x)
x = nn.relu(x)
if max_value is not None:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of positive section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
def softmax(x):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softmax(x)
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
def categorical_crossentropy(target, output, from_logits=False):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
Returns:
Output tensor.
"""
# Note: nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= math_ops.reduce_sum(
output, axis=len(output.get_shape()) - 1, keep_dims=True)
# manual computation of crossentropy
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(
target * math_ops.log(output),
axis=len(output.get_shape()) - 1)
else:
return nn.softmax_cross_entropy_with_logits(labels=target, logits=output)
def sparse_categorical_crossentropy(target, output, from_logits=False):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
Returns:
Output tensor.
"""
# Note: nn.sparse_softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
output_shape = output.get_shape()
targets = cast(flatten(target), 'int64')
logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
res = nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
if len(output_shape) == 3:
# if our output includes timesteps we need to reshape
return array_ops.reshape(res, array_ops.shape(output)[:-1])
else:
return res
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
# Note: nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# transform back to logits
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output / (1 - output))
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = (0.2 * x) + 0.5
zero = _to_tensor(0., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, one)
return x
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
retain_prob = 1. - level
if seed is None:
seed = np.random.randint(10e6)
# the dummy 1. works around a TF bug
# (float32_ref vs. float32 incompatibility)
return nn.dropout(x * 1., retain_prob, noise_shape, seed=seed)
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, dim=axis)
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv2d_input(x, data_format):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to tensorflow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding:', padding)
return padding
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
kernel_shape = kernel.get_shape().as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
if data_format == 'channels_last':
tf_data_format = 'NWC'
else:
tf_data_format = 'NCW'
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=(dilation_rate,),
strides=(strides,),
padding=padding,
data_format=tf_data_format)
return x
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow data format
for inputs/kernels/outputs.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv2d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape: (batch_size, steps, input_dim)
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters)
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window
strides: a tuple of a single integer,
specifying the stride length of the convolution
data_format: the data format, channels_first or channels_last
Returns:
the tensor after 1d conv with un-shared weights, with shape (batch_size,
output_length, filters)
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length = kernel_shape[0]
feature_dim = kernel_shape[1]
xs = []
for i in range(output_length):
slice_length = slice(i * stride, i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
# Shape: `(output_length, batch_size, filters)`.
output = batch_dot(x_aggregate, kernel)
return permute_dimensions(output, (1, 0, 2))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters)
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col)
data_format: the data format, channels_first or channels_last
Returns:
A 4d tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
filters = kernel_shape[2]
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row, i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col, j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(
reshape(inputs[:, :, slice_row, slice_col], (1, -1, feature_dim)))
else:
xs.append(
reshape(inputs[:, slice_row, slice_col, :], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, (output_row, output_col, -1, filters))
if data_format == 'channels_first':
output = permute_dimensions(output, (2, 3, 0, 1))
else:
output = permute_dimensions(output, (2, 0, 1, 3))
return output
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x += reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x += reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x += reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x += reshape(bias, (1, bias_shape[0], 1, 1))
else:
x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x += reshape(bias, (1, bias_shape[0], 1))
else:
x += reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x += reshape(bias, (1, 1, bias_shape[0]))
else:
x += reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
return x
# RANDOMNESS
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.to_int64(indices), vals_sparse, math_ops.to_int64(label_shape))
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.to_int32(array_ops.squeeze(label_length))
input_length = math_ops.to_int32(array_ops.squeeze(input_length))
sparse_labels = math_ops.to_int32(
ctc_label_dense_to_sparse(y_true, label_length))
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + 1e-8)
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + 1e-8)
input_length = math_ops.to_int32(input_length)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return functional_ops.map_fn(fn, elems, name=name, dtype=dtype)
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
| 28.781604 | 84 | 0.646623 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.layers import base as tf_base_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.training import moving_averages
from tensorflow.python.util import tf_inspect
py_all = all
py_sum = sum
_SESSION = None
_GRAPH_LEARNING_PHASES = {}
_GRAPH_UID_DICTS = {}
_MANUAL_VAR_INIT = False
_FLOATX = 'float32'
_EPSILON = 10e-8
_IMAGE_DATA_FORMAT = 'channels_last'
def backend():
return 'tensorflow'
def epsilon():
return _EPSILON
def set_epsilon(value):
global _EPSILON
_EPSILON = value
def floatx():
return _FLOATX
def set_floatx(value):
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
def cast_to_floatx(x):
return np.asarray(x, dtype=_FLOATX)
def image_data_format():
return _IMAGE_DATA_FORMAT
def set_image_data_format(data_format):
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format:', data_format)
_IMAGE_DATA_FORMAT = str(data_format)
def get_uid(prefix=''):
graph = ops.get_default_graph()
if graph not in tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS:
tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(
int)
layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
def reset_uids():
per_graph_layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS
keys = list(per_graph_layer_name_uids.keys())
for key in keys:
del per_graph_layer_name_uids[key]
def clear_session():
global _SESSION
global _GRAPH_LEARNING_PHASES
ops.reset_default_graph()
reset_uids()
_SESSION = None
phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[ops.get_default_graph()] = phase
def manual_variable_initialization(value):
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
def learning_phase():
graph = ops.get_default_graph()
if graph not in _GRAPH_LEARNING_PHASES:
phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
def set_learning_phase(value):
global _GRAPH_LEARNING_PHASES
if value not in {0, 1}:
raise ValueError('Expected learning phase to be ' '0 or 1.')
_GRAPH_LEARNING_PHASES[ops.get_default_graph()] = value
def get_session():
global _SESSION
if ops.get_default_session() is not None:
session = ops.get_default_session()
else:
if _SESSION is None:
if not os.environ.get('OMP_NUM_THREADS'):
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
num_thread = int(os.environ.get('OMP_NUM_THREADS'))
config = config_pb2.ConfigProto(
intra_op_parallelism_threads=num_thread, allow_soft_placement=True)
_SESSION = session_module.Session(config=config)
session = _SESSION
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def set_session(session):
global _SESSION
_SESSION = session
class _TfDeviceCaptureOp(object):
def __init__(self):
self.device = None
def _set_device(self, device):
self.device = device
def _get_current_tf_device():
g = ops.get_default_graph()
op = _TfDeviceCaptureOp()
g._apply_device_functions(op)
return op.device
def _is_current_explicit_device(device_type):
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('device_type should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
devices = get_session().list_devices()
return [x.name for x in devices if x.device_type == 'GPU']
def _has_nchw_support():
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
def _to_tensor(x, dtype):
return ops.convert_to_tensor(x, dtype=dtype)
def is_sparse(tensor):
return isinstance(tensor, sparse_tensor.SparseTensor)
def to_dense(tensor):
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
name_scope = ops.name_scope
def variable(value, dtype=None, name=None, constraint=None):
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
v._uses_learning_phase = False
return v
v = variables_module.Variable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'get_shape'):
v._keras_shape = int_shape(value)
v._uses_learning_phase = False
return v
def _initialize_variables(session):
variables = variables_module.global_variables()
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
x._uses_learning_phase = False
return x
def is_placeholder(x):
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
def shape(x):
return array_ops.shape(x)
def int_shape(x):
try:
return tuple(x.get_shape().as_list())
except ValueError:
return None
def ndim(x):
dims = x.get_shape()._dims
if dims is not None:
return len(dims)
return None
def dtype(x):
return x.dtype.base_dtype.name
def eval(x):
return to_dense(x).eval(session=get_session())
def zeros(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(
init_ops.constant_initializer(0., dtype=tf_dtype)(shape), dtype, name)
def ones(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(
init_ops.constant_initializer(1., dtype=tf_dtype)(shape), dtype, name)
def eye(size, dtype=None, name=None):
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
def zeros_like(x, dtype=None, name=None):
return array_ops.zeros_like(x, dtype=dtype, name=name)
def ones_like(x, dtype=None, name=None):
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
return array_ops.identity(x, name=name)
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
def count_params(x):
return np.prod(x.get_shape().as_list())
def cast(x, dtype):
return math_ops.cast(x, dtype)
def update(x, new_x):
return state_ops.assign(x, new_x)
def update_add(x, increment):
return state_ops.assign_add(x, increment)
def update_sub(x, decrement):
return state_ops.assign_sub(x, decrement)
def moving_average_update(x, value, momentum):
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=False)
def dot(x, y):
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
def batch_dot(x, y, axes=None):
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
if axes is not None:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
else:
adj_x = None
adj_y = None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
def transpose(x):
return array_ops.transpose(x)
def gather(reference, indices):
return array_ops.gather(reference, indices)
def max(x, axis=None, keepdims=False):
return math_ops.reduce_max(x, axis=axis, keep_dims=keepdims)
def min(x, axis=None, keepdims=False):
return math_ops.reduce_min(x, axis=axis, keep_dims=keepdims)
def sum(x, axis=None, keepdims=False):
return math_ops.reduce_sum(x, axis=axis, keep_dims=keepdims)
def prod(x, axis=None, keepdims=False):
return math_ops.reduce_prod(x, axis=axis, keep_dims=keepdims)
def cumsum(x, axis=0):
return math_ops.cumsum(x, axis=axis)
def cumprod(x, axis=0):
return math_ops.cumprod(x, axis=axis)
def var(x, axis=None, keepdims=False):
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
m = math_ops.reduce_mean(x, axis=axis, keep_dims=True)
devs_squared = math_ops.square(x - m)
return math_ops.reduce_mean(
devs_squared, axis=axis, keep_dims=keepdims)
def std(x, axis=None, keepdims=False):
return math_ops.sqrt(var(x, axis=axis, keepdims=keepdims))
def mean(x, axis=None, keepdims=False):
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis=axis, keep_dims=keepdims)
def any(x, axis=None, keepdims=False):
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis=axis, keep_dims=keepdims)
def all(x, axis=None, keepdims=False):
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis=axis, keep_dims=keepdims)
def argmax(x, axis=-1):
return math_ops.argmax(x, axis)
def argmin(x, axis=-1):
return math_ops.argmin(x, axis)
def square(x):
return math_ops.square(x)
def abs(x):
return math_ops.abs(x)
def sqrt(x):
zero = _to_tensor(0., x.dtype.base_dtype)
inf = _to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
def exp(x):
return math_ops.exp(x)
def log(x):
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
return math_ops.reduce_logsumexp(x, axis=axis, keep_dims=keepdims)
def round(x):
return math_ops.round(x)
def sign(x):
return math_ops.sign(x)
def pow(x, a):
return math_ops.pow(x, a)
def clip(x, min_value, max_value):
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _to_tensor(min_value, x.dtype.base_dtype)
max_value = _to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
def equal(x, y):
return math_ops.equal(x, y)
def not_equal(x, y):
return math_ops.not_equal(x, y)
def greater(x, y):
return math_ops.greater(x, y)
def greater_equal(x, y):
return math_ops.greater_equal(x, y)
def less(x, y):
return math_ops.less(x, y)
def less_equal(x, y):
return math_ops.less_equal(x, y)
def maximum(x, y):
return math_ops.maximum(x, y)
def minimum(x, y):
return math_ops.minimum(x, y)
def sin(x):
return math_ops.sin(x)
def cos(x):
return math_ops.cos(x)
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
mean, var = nn.moments(
x, reduction_axes, shift=None, name=None, keep_dims=False)
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
else:
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
def concatenate(tensors, axis=-1):
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all([is_sparse(x) for x in tensors]):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
def reshape(x, shape):
return array_ops.reshape(x, shape)
def permute_dimensions(x, pattern):
return array_ops.transpose(x, perm=pattern)
def resize_images(x, height_factor, width_factor, data_format):
if data_format == 'channels_first':
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[2:]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor]).astype('int32'))
x = permute_dimensions(x, [0, 2, 3, 1])
x = image_ops.resize_nearest_neighbor(x, new_shape)
x = permute_dimensions(x, [0, 3, 1, 2])
x.set_shape((None, None, original_shape[2] * height_factor
if original_shape[2] is not None else None,
original_shape[3] * width_factor
if original_shape[3] is not None else None))
return x
elif data_format == 'channels_last':
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[1:3]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor]).astype('int32'))
x = image_ops.resize_nearest_neighbor(x, new_shape)
x.set_shape((None, original_shape[1] * height_factor
if original_shape[1] is not None else None,
original_shape[2] * width_factor
if original_shape[2] is not None else None, None))
return x
else:
raise ValueError('Invalid data_format:', data_format)
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format:', data_format)
def repeat_elements(x, rep, axis):
x_shape = x.get_shape().as_list()
if x_shape[axis] is not None:
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.get_shape()) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
x_shape = x.get_shape().as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
def repeat(x, n):
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
def arange(start, stop=None, step=1, dtype='int32'):
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
def tile(x, n):
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
def flatten(x):
return array_ops.reshape(x, [-1])
def batch_flatten(x):
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
def expand_dims(x, axis=-1):
return array_ops.expand_dims(x, axis)
def squeeze(x, axis):
return array_ops.squeeze(x, [axis])
def temporal_padding(x, padding=(1, 1)):
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
def stack(x, axis=0):
return array_ops.stack(x, axis=axis)
def one_hot(indices, num_classes):
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
def reverse(x, axes):
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
def get_value(x):
return x.eval(session=get_session())
def batch_get_value(tensors):
if tensors:
return get_session().run(tensors)
else:
return []
def set_value(x, value):
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
def batch_set_value(tuples):
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
def print_tensor(x, message=''):
return logging_ops.Print(x, [x], message)
class Function(object):
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` to a TensorFlow backend function '
'should be a list or tuple.')
if not isinstance(outputs, (list, tuple)):
raise TypeError('`outputs` of a TensorFlow backend function '
'should be a list or tuple.')
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a TensorFlow backend function '
'should be a list or tuple.')
self.inputs = list(inputs)
self.outputs = list(outputs)
with ops.control_dependencies(self.outputs):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
self.session_kwargs = session_kwargs
def __call__(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` should be a list or tuple.')
feed_dict = {}
for tensor, value in zip(self.inputs, inputs):
if is_sparse(tensor):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
feed_dict[tensor] = value
session = get_session()
updated = session.run(
self.outputs + [self.updates_op],
feed_dict=feed_dict,
**self.session_kwargs)
return updated[:len(self.outputs)]
def function(inputs, outputs, updates=None, **kwargs):
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getargspec(session_module.Session.run)[0] and
key not in tf_inspect.getargspec(Function.__init__)[0]):
msg = ('Invalid argument "%s" passed to K.function with Tensorflow '
'backend') % key
raise ValueError(msg)
return Function(inputs, outputs, updates=updates, **kwargs)
def gradients(loss, variables):
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
def stop_gradient(variables):
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False):
ndim = len(inputs.get_shape())
if ndim < 3:
raise ValueError('Input should be at least 3D.')
axes = [1, 0] + list(range(2, ndim))
inputs = array_ops.transpose(inputs, (axes))
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.get_shape()) == ndim - 1:
mask = expand_dims(mask)
mask = array_ops.transpose(mask, axes)
if constants is None:
constants = []
global uses_learning_phase
uses_learning_phase = False
if unroll:
if not inputs.get_shape()[0]:
raise ValueError('Unrolling requires a ' 'fixed number of timesteps.')
states = initial_states
successive_states = []
successive_outputs = []
input_list = array_ops.unstack(inputs)
if go_backwards:
input_list.reverse()
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for inp, mask_t in zip(input_list, mask_list):
output, new_states = step_function(inp, states + constants)
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
# it just repeats the mask along its second dimension
# n times.
tiled_mask_t = array_ops.tile(mask_t,
array_ops.stack(
[1, array_ops.shape(output)[1]]))
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = array_ops.tile(mask_t,
array_ops.stack(
[1,
array_ops.shape(new_state)[1]]))
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
for inp in input_list:
output, states = step_function(inp, states + constants)
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
if go_backwards:
inputs = reverse(inputs, 0)
states = tuple(initial_states)
time_steps = array_ops.shape(inputs)[0]
outputs, _ = step_function(inputs[0], initial_states + constants)
output_ta = tensor_array_ops.TensorArray(
dtype=outputs.dtype, size=time_steps, tensor_array_name='output_ta')
input_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps, tensor_array_name='input_ta')
input_ta = input_ta.unstack(inputs)
time = constant_op.constant(0, dtype='int32', name='time')
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = input_ta.read(time)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
global uses_learning_phase # pylint: disable=global-variable-undefined
uses_learning_phase = True
for state, new_state in zip(states, new_states):
new_state.set_shape(state.get_shape())
tiled_mask_t = array_ops.tile(mask_t,
array_ops.stack(
[1, array_ops.shape(output)[1]]))
output = array_ops.where(tiled_mask_t, output, states[0])
new_states = [
array_ops.where(tiled_mask_t, new_states[i], states[i])
for i in range(len(states))
]
output_ta_t = output_ta_t.write(time, output)
return (time + 1, output_ta_t) + tuple(new_states)
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = input_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
global uses_learning_phase # pylint: disable=global-variable-undefined
uses_learning_phase = True
for state, new_state in zip(states, new_states):
new_state.set_shape(state.get_shape())
output_ta_t = output_ta_t.write(time, output)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_step,
loop_vars=(time, output_ta) + states,
parallel_iterations=32,
swap_memory=True)
last_time = final_outputs[0]
output_ta = final_outputs[1]
new_states = final_outputs[2:]
outputs = output_ta.stack()
last_output = output_ta.read(last_time - 1)
axes = [1, 0] + list(range(2, len(outputs.get_shape())))
outputs = array_ops.transpose(outputs, axes)
last_output._uses_learning_phase = uses_learning_phase
return last_output, outputs, new_states
def switch(condition, then_expression, else_expression):
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
def in_train_phase(x, alt, training=None):
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
if training is 1 or training is True:
if callable(x):
return x()
else:
return x
elif training is 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
if uses_learning_phase:
x._uses_learning_phase = True
return x
def in_test_phase(x, alt, training=None):
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
def relu(x, alpha=0., max_value=None):
if alpha != 0.:
negative_part = nn.relu(-x)
x = nn.relu(x)
if max_value is not None:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
def elu(x, alpha=1.):
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
def softmax(x):
return nn.softmax(x)
def softplus(x):
return nn.softplus(x)
def softsign(x):
return nn.softsign(x)
def categorical_crossentropy(target, output, from_logits=False):
# Note: nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= math_ops.reduce_sum(
output, axis=len(output.get_shape()) - 1, keep_dims=True)
# manual computation of crossentropy
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(
target * math_ops.log(output),
axis=len(output.get_shape()) - 1)
else:
return nn.softmax_cross_entropy_with_logits(labels=target, logits=output)
def sparse_categorical_crossentropy(target, output, from_logits=False):
# Note: nn.sparse_softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
output_shape = output.get_shape()
targets = cast(flatten(target), 'int64')
logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
res = nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
if len(output_shape) == 3:
# if our output includes timesteps we need to reshape
return array_ops.reshape(res, array_ops.shape(output)[:-1])
else:
return res
def binary_crossentropy(target, output, from_logits=False):
# Note: nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# transform back to logits
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output / (1 - output))
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
def sigmoid(x):
return nn.sigmoid(x)
def hard_sigmoid(x):
x = (0.2 * x) + 0.5
zero = _to_tensor(0., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, one)
return x
def tanh(x):
return nn.tanh(x)
def dropout(x, level, noise_shape=None, seed=None):
retain_prob = 1. - level
if seed is None:
seed = np.random.randint(10e6)
# the dummy 1. works around a TF bug
# (float32_ref vs. float32 incompatibility)
return nn.dropout(x * 1., retain_prob, noise_shape, seed=seed)
def l2_normalize(x, axis=None):
return nn.l2_normalize(x, dim=axis)
def in_top_k(predictions, targets, k):
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv2d_input(x, data_format):
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding:', padding)
return padding
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
kernel_shape = kernel.get_shape().as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
if data_format == 'channels_last':
tf_data_format = 'NWC'
else:
tf_data_format = 'NCW'
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=(dilation_rate,),
strides=(strides,),
padding=padding,
data_format=tf_data_format)
return x
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv2d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length = kernel_shape[0]
feature_dim = kernel_shape[1]
xs = []
for i in range(output_length):
slice_length = slice(i * stride, i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
# Shape: `(output_length, batch_size, filters)`.
output = batch_dot(x_aggregate, kernel)
return permute_dimensions(output, (1, 0, 2))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
filters = kernel_shape[2]
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row, i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col, j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(
reshape(inputs[:, :, slice_row, slice_col], (1, -1, feature_dim)))
else:
xs.append(
reshape(inputs[:, slice_row, slice_col, :], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, (output_row, output_col, -1, filters))
if data_format == 'channels_first':
output = permute_dimensions(output, (2, 3, 0, 1))
else:
output = permute_dimensions(output, (2, 0, 1, 3))
return output
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x += reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x += reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x += reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x += reshape(bias, (1, bias_shape[0], 1, 1))
else:
x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x += reshape(bias, (1, bias_shape[0], 1))
else:
x += reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x += reshape(bias, (1, 1, bias_shape[0]))
else:
x += reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
return x
# RANDOMNESS
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
def random_binomial(shape, p=0.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
def ctc_label_dense_to_sparse(labels, label_lengths):
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.to_int64(indices), vals_sparse, math_ops.to_int64(label_shape))
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
label_length = math_ops.to_int32(array_ops.squeeze(label_length))
input_length = math_ops.to_int32(array_ops.squeeze(input_length))
sparse_labels = math_ops.to_int32(
ctc_label_dense_to_sparse(y_true, label_length))
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + 1e-8)
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + 1e-8)
input_length = math_ops.to_int32(input_length)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
def map_fn(fn, elems, name=None, dtype=None):
return functional_ops.map_fn(fn, elems, name=name, dtype=dtype)
def foldl(fn, elems, initializer=None, name=None):
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
def foldr(fn, elems, initializer=None, name=None):
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
pass
| true | true |
f7f5832af9f1ee27321f865cb69e9f0c465c1b73 | 1,352 | py | Python | Python3/config.py | RealKevinApetrei/GUI-Hangman | d2fef126be60f012a455af3f69ab1a1f1f7004c6 | [
"MIT"
] | 1 | 2020-12-07T21:28:01.000Z | 2020-12-07T21:28:01.000Z | Python3/config.py | RealKevinApetrei/GUI-Hangman | d2fef126be60f012a455af3f69ab1a1f1f7004c6 | [
"MIT"
] | null | null | null | Python3/config.py | RealKevinApetrei/GUI-Hangman | d2fef126be60f012a455af3f69ab1a1f1f7004c6 | [
"MIT"
] | null | null | null | # DEVELOPER CONFIG (EDIT NOT RECOMMENDED)
BUILD_VERSION = "v1.13.23-public"
AUTHOR = "Kevin Apetrei"
PROGRAM_NAME = "GUI Hangman"
TOTAL_GUESSES_ALLOWED = 5
BLANK = "_____"
"""
CREDITS:
'back.png' Icon made by 'Kiranshastry' from www.flaticon.com (24px)
GUI HANGMAN
"""
"""
MIT License
Copyright (c) 2020 Kevin Apetrei
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
""" | 33.8 | 78 | 0.784024 |
BUILD_VERSION = "v1.13.23-public"
AUTHOR = "Kevin Apetrei"
PROGRAM_NAME = "GUI Hangman"
TOTAL_GUESSES_ALLOWED = 5
BLANK = "_____"
| true | true |
f7f583415c6f1c00760c8f767031302a8aeb35de | 500 | py | Python | data/scripts/templates/object/tangible/loot/collectible/collectible_parts/shared_blue_rug_thread_07.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/loot/collectible/collectible_parts/shared_blue_rug_thread_07.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/loot/collectible/collectible_parts/shared_blue_rug_thread_07.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/collectible/collectible_parts/shared_blue_rug_thread_07.iff"
result.attribute_template_id = -1
result.stfName("collectible_loot_items_n","blue_rug_thread_07")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 29.411765 | 101 | 0.752 | true | true | |
f7f5838ae0f5ee4578be1ef08bf5040b2c666c8e | 5,656 | py | Python | tensormonk/architectures/trees.py | Tensor46/TensorMONK | 67617d3fdf8fde072ba9cab42de7d67c79b17494 | [
"MIT"
] | 29 | 2018-07-06T23:57:23.000Z | 2022-03-08T20:38:57.000Z | tensormonk/architectures/trees.py | Johnson-yue/TensorMONK | 1785132b82c685c3b3fc05b00dec46b1fccfc948 | [
"MIT"
] | 3 | 2018-12-14T22:21:26.000Z | 2020-06-19T02:13:34.000Z | tensormonk/architectures/trees.py | Johnson-yue/TensorMONK | 1785132b82c685c3b3fc05b00dec46b1fccfc948 | [
"MIT"
] | 8 | 2018-07-06T23:58:03.000Z | 2021-04-12T01:35:54.000Z | """ TensorMONK :: architectures """
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from ..layers import Linear
class NeuralTree(nn.Module):
r""" A neural tree for Neural Decision Forest!
Args:
tensor_size: shape of 2D/4D tensor
2D - (None/any integer, features)
4D - (None/any integer, channels, height, width)
n_labels: number of labels or classes
depth: depth of trees
Ex: linear layer indices for a tree of depth = 2
0
1 2
3 4 5 6
a linear requries 7 output neurons (2**(depth+1) - 1)
dropout: 0. - 1., default = 0.2
network = any custom torch module can be used to produce leaf outputs
(must have output neurons of length 2**(depth+1)-1)
when None, linear + relu + dropout + linear + sigm
Return:
decision (a torch.Tensor), predictions (a torch.Tensor)
"""
def __init__(self,
tensor_size: tuple,
n_labels: int,
depth: int,
dropout: float = 0.2,
network: torch.nn.Module = None):
super(NeuralTree, self).__init__()
assert depth > 0, \
"NeuralTree :: depth must be > 0, given {}".format(depth)
self.tensor_size = tensor_size
self.n_labels = n_labels
self.depth = depth
self.n_leafs = 2**(depth+1)
# dividing the linear output to decisions at different levels
self.decision_per_depth = [2**x for x in range(depth+1)]
# their indices
self.indices_per_depth = [list(range(y-x, max(1, y))) for x, y in
zip(self.decision_per_depth,
np.cumsum(self.decision_per_depth))]
# an example - can be any number of layers
hidden = (self.n_leafs+1)*4
self.tree = nn.Sequential(
Linear(tensor_size, hidden, "relu", dropout),
Linear((None, hidden), self.n_leafs-1, "sigm", dropout)) \
if network is None else network
self.weight = nn.Parameter(torch.randn(self.n_leafs, n_labels))
self.weight.data.normal_(0, 0.02)
self.tensor_size = (None, n_labels)
def forward(self, tensor):
if tensor.dim() > 2:
tensor = tensor.view(tensor.size(0), -1)
BSZ = tensor.size(0)
# get all leaf responses -- a simple linear layer
leaf_responses = self.tree(tensor)
# compute decisions from the final depth
decision = leaf_responses[:, self.indices_per_depth[0]]
for x in self.indices_per_depth[1:]:
decision = decision.unsqueeze(2)
# true and false of last depth
decision = torch.cat((decision, 1 - decision), 2).view(BSZ, -1)
# current depth decisions
decision = decision.mul(leaf_responses[:, x])
decision = decision.unsqueeze(2)
decision = torch.cat((decision, 1 - decision), 2).view(BSZ, -1)
# predictions
predictions = decision.unsqueeze(2)
predictions = predictions.mul(F.softmax(self.weight, 1).unsqueeze(0))
return decision, predictions.sum(1)
# from tensormonk.layers import Linear
# test = NeuralTree((1, 64), 12, 4)
# decision, predictions = test(torch.randn(1, 64))
# decision.shape
# predictions.shape
class NeuralDecisionForest(nn.Module):
r"""Neural Decision Forest!
A version of https://ieeexplore.ieee.org/document/7410529
Args:
tensor_size: shape of 2D/4D tensor
2D - (None/any integer, features)
4D - (None/any integer, channels, height, width)
n_labels: number of labels or classes
n_trees: number of trees
depth: depth of trees
Ex: linear layer indices for a tree of depth = 2
0
1 2
3 4 5 6
a linear requries 7 output neurons (2**(depth+1) - 1)
dropout: 0. - 1., default = 0.2
network = any custom torch module can be used to produce leaf outputs
(must have output neurons of length 2**(depth+1)-1)
when None, linear + relu + dropout + linear + sigm
Return:
decision (a torch.Tensor), predictions (a torch.Tensor)
"""
def __init__(self,
tensor_size: tuple,
n_labels: int,
n_trees: int,
depth: int,
dropout: float = 0.2,
network: torch.nn.Module = None):
super(NeuralDecisionForest, self).__init__()
self.trees = nn.ModuleList([NeuralTree(tensor_size, n_labels, depth,
dropout, network)
for i in range(n_trees)])
self.tensor_size = self.trees[0].tensor_size
def forward(self, tensor):
if tensor.dim() > 2:
tensor = tensor.view(tensor.size(0), -1)
decisions, predictions = [], []
for tree in self.trees:
decision, prediction = tree(tensor)
decisions.append(decision)
predictions.append(prediction.unsqueeze(2))
decisions = torch.cat(decisions, 1)
predictions = torch.cat(predictions, 2)
return decisions[:, 0::2], predictions.mean(2).log()
# test = NeuralDecisionForest((1, 256), 6, 8, 5)
# decisions, predictions = test(torch.rand(1, 256))
# %timeit decisions, predictions = test(torch.rand(1, 256))
# np.sum([p.numel() for p in test.parameters()])
# decisions.shape
| 37.456954 | 77 | 0.571075 |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from ..layers import Linear
class NeuralTree(nn.Module):
def __init__(self,
tensor_size: tuple,
n_labels: int,
depth: int,
dropout: float = 0.2,
network: torch.nn.Module = None):
super(NeuralTree, self).__init__()
assert depth > 0, \
"NeuralTree :: depth must be > 0, given {}".format(depth)
self.tensor_size = tensor_size
self.n_labels = n_labels
self.depth = depth
self.n_leafs = 2**(depth+1)
self.decision_per_depth = [2**x for x in range(depth+1)]
self.indices_per_depth = [list(range(y-x, max(1, y))) for x, y in
zip(self.decision_per_depth,
np.cumsum(self.decision_per_depth))]
hidden = (self.n_leafs+1)*4
self.tree = nn.Sequential(
Linear(tensor_size, hidden, "relu", dropout),
Linear((None, hidden), self.n_leafs-1, "sigm", dropout)) \
if network is None else network
self.weight = nn.Parameter(torch.randn(self.n_leafs, n_labels))
self.weight.data.normal_(0, 0.02)
self.tensor_size = (None, n_labels)
def forward(self, tensor):
if tensor.dim() > 2:
tensor = tensor.view(tensor.size(0), -1)
BSZ = tensor.size(0)
leaf_responses = self.tree(tensor)
decision = leaf_responses[:, self.indices_per_depth[0]]
for x in self.indices_per_depth[1:]:
decision = decision.unsqueeze(2)
decision = torch.cat((decision, 1 - decision), 2).view(BSZ, -1)
decision = decision.mul(leaf_responses[:, x])
decision = decision.unsqueeze(2)
decision = torch.cat((decision, 1 - decision), 2).view(BSZ, -1)
predictions = decision.unsqueeze(2)
predictions = predictions.mul(F.softmax(self.weight, 1).unsqueeze(0))
return decision, predictions.sum(1)
class NeuralDecisionForest(nn.Module):
def __init__(self,
tensor_size: tuple,
n_labels: int,
n_trees: int,
depth: int,
dropout: float = 0.2,
network: torch.nn.Module = None):
super(NeuralDecisionForest, self).__init__()
self.trees = nn.ModuleList([NeuralTree(tensor_size, n_labels, depth,
dropout, network)
for i in range(n_trees)])
self.tensor_size = self.trees[0].tensor_size
def forward(self, tensor):
if tensor.dim() > 2:
tensor = tensor.view(tensor.size(0), -1)
decisions, predictions = [], []
for tree in self.trees:
decision, prediction = tree(tensor)
decisions.append(decision)
predictions.append(prediction.unsqueeze(2))
decisions = torch.cat(decisions, 1)
predictions = torch.cat(predictions, 2)
return decisions[:, 0::2], predictions.mean(2).log()
| true | true |
f7f5842133e855664724eb818f7cc747b9af23ec | 169 | py | Python | 03/src/prob/abc137_a.py | fumiyanll23/PythonLearning | 5d72af4c75ce5032eeef2212a8d1c74acbe2c711 | [
"MIT"
] | 1 | 2021-03-20T06:25:16.000Z | 2021-03-20T06:25:16.000Z | 03/src/prob/abc137_a.py | fumiyanll23/PythonLearning | 5d72af4c75ce5032eeef2212a8d1c74acbe2c711 | [
"MIT"
] | null | null | null | 03/src/prob/abc137_a.py | fumiyanll23/PythonLearning | 5d72af4c75ce5032eeef2212a8d1c74acbe2c711 | [
"MIT"
] | 1 | 2021-04-03T04:24:55.000Z | 2021-04-03T04:24:55.000Z | # ABC137A - +-x
def main():
# input
A, B = map(int, input().split())
# compute
"""WRITE BELOW"""
# output
if __name__ == '__main__':
main()
| 11.266667 | 36 | 0.491124 |
def main():
A, B = map(int, input().split())
if __name__ == '__main__':
main()
| true | true |
f7f58498b30fce896c5595a55802dcf62d967be5 | 10,979 | py | Python | test/functional/tests/cache_ops/test_multistream_seq_cutoff.py | andreatomassetti/open-cas-linux | 6a6a0267d76dca86de8695a959991ecefdc0ddf8 | [
"BSD-3-Clause"
] | 1 | 2022-01-23T23:50:23.000Z | 2022-01-23T23:50:23.000Z | test/functional/tests/cache_ops/test_multistream_seq_cutoff.py | andreatomassetti/open-cas-linux | 6a6a0267d76dca86de8695a959991ecefdc0ddf8 | [
"BSD-3-Clause"
] | 1 | 2022-03-21T22:05:26.000Z | 2022-03-21T22:05:26.000Z | test/functional/tests/cache_ops/test_multistream_seq_cutoff.py | andreatomassetti/open-cas-linux | 6a6a0267d76dca86de8695a959991ecefdc0ddf8 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import random
from time import sleep
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode, SeqCutOffPolicy, CacheModeTrait
from core.test_run_utils import TestRun
from storage_devices.disk import DiskTypeSet, DiskTypeLowerThan, DiskType
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import IoEngine, ReadWrite
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
random_thresholds = random.sample(range(1028, 1024 ** 2, 4), 3)
random_stream_numbers = random.sample(range(2, 128), 3)
@pytest.mark.parametrizex("streams_number", [1, 128] + random_stream_numbers)
@pytest.mark.parametrizex("threshold",
[Size(1, Unit.MebiByte), Size(1, Unit.GibiByte)]
+ [Size(x, Unit.KibiByte) for x in random_thresholds])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_multistream_seq_cutoff_functional(threshold, streams_number):
"""
title: Functional test for multistream sequential cutoff
description: |
Testing if amount of data written to cache and core is correct after running sequential
writes from multiple streams with different sequential cut-off thresholds.
pass_criteria:
- Amount of data written to cache is equal to amount set with sequential cutoff threshold
- Amount of data written in pass-through is equal to io size run after reaching the
sequential cutoff threshold
"""
with TestRun.step("Start cache and add core device."):
cache_disk = TestRun.disks['cache']
core_disk = TestRun.disks['core']
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
Udev.disable()
core = cache.add_core(core_disk)
with TestRun.step(f"Set seq-cutoff policy to always, threshold to {threshold} "
f"and reset statistics counters."):
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold)
core.set_seq_cutoff_promotion_count(1)
core.reset_counters()
with TestRun.step(f"Run {streams_number} I/O streams with amount of sequential writes equal to "
f"seq-cutoff threshold value minus one 4k block."):
kib_between_streams = 100
range_step = int(threshold.get_value(Unit.KibiByte)) + kib_between_streams
max_range_offset = streams_number * range_step
offsets = [o for o in range(0, max_range_offset, range_step)]
core_statistics_before = core.get_statistics()
for i in TestRun.iteration(range(0, len(offsets))):
TestRun.LOGGER.info(f"Statistics before I/O:\n{core_statistics_before}")
offset = Size(offsets[i], Unit.KibiByte)
run_dd(core.path, count=int(threshold.get_value(Unit.Blocks4096) - 1),
seek=int(offset.get_value(Unit.Blocks4096)))
core_statistics_after = core.get_statistics()
check_statistics(core_statistics_before,
core_statistics_after,
expected_pt=0,
expected_writes_to_cache=threshold - Size(1, Unit.Blocks4096))
core_statistics_before = core_statistics_after
with TestRun.step("Write random number of 4k block requests to each stream and check if all "
"writes were sent in pass-through mode."):
core_statistics_before = core.get_statistics()
random.shuffle(offsets)
for i in TestRun.iteration(range(0, len(offsets))):
TestRun.LOGGER.info(f"Statistics before second I/O:\n{core_statistics_before}")
additional_4k_blocks_writes = random.randint(1, kib_between_streams / 4)
offset = Size(offsets[i], Unit.KibiByte)
run_dd(
core.path, count=additional_4k_blocks_writes,
seek=int(offset.get_value(Unit.Blocks4096)
+ threshold.get_value(Unit.Blocks4096) - 1))
core_statistics_after = core.get_statistics()
check_statistics(core_statistics_before,
core_statistics_after,
expected_pt=additional_4k_blocks_writes,
expected_writes_to_cache=Size.zero())
core_statistics_before = core_statistics_after
def check_statistics(stats_before, stats_after, expected_pt, expected_writes_to_cache):
TestRun.LOGGER.info(f"Statistics after I/O:\n{stats_after}")
writes_to_cache_before = stats_before.block_stats.cache.writes
writes_to_cache_after = stats_after.block_stats.cache.writes
pt_writes_before = stats_before.request_stats.pass_through_writes
pt_writes_after = stats_after.request_stats.pass_through_writes
actual_pt = pt_writes_after - pt_writes_before
actual_writes_to_cache = writes_to_cache_after - writes_to_cache_before
if actual_pt != expected_pt:
TestRun.LOGGER.error(f"Expected pass-through writes: {expected_pt}\n"
f"Actual pass-through writes: {actual_pt}")
if actual_writes_to_cache != expected_writes_to_cache:
TestRun.LOGGER.error(
f"Expected writes to cache: {expected_writes_to_cache}\n"
f"Actual writes to cache: {actual_writes_to_cache}")
def run_dd(target_path, count, seek):
dd = Dd() \
.input("/dev/zero") \
.output(target_path) \
.block_size(Size(1, Unit.Blocks4096)) \
.count(count) \
.oflag("direct") \
.seek(seek)
dd.run()
TestRun.LOGGER.info(f"dd command:\n{dd}")
@pytest.mark.parametrizex("streams_seq_rand", [(64, 64), (64, 192)])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
"""
title: Stress test for multistream sequential cutoff on raw device
description: |
Testing the stability of a system when there are multiple sequential and random I/O streams
running against the raw exported object with the sequential cutoff policy set to always and
the sequential cutoff threshold set to a value which is able to be reached by
sequential I/O streams.
pass_criteria:
- No system crash
"""
with TestRun.step("Start cache and add core device."):
cache_disk = TestRun.disks['cache']
core_disk = TestRun.disks['core']
cache_disk.create_partitions([Size(1.5, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
Udev.disable()
core = cache.add_core(core_disk)
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 512KiB."):
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(Size(512, Unit.KibiByte))
with TestRun.step("Reset core statistics counters."):
core.reset_counters()
with TestRun.step("Run I/O"):
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
sequential_streams = streams_seq_rand[0]
random_streams = streams_seq_rand[1]
fio = (Fio().create_command()
.io_engine(IoEngine.libaio)
.block_size(Size(1, Unit.Blocks4096))
.direct()
.offset_increment(stream_size))
for i in range(0, sequential_streams + random_streams):
fio_job = fio.add_job(job_name=f"stream_{i}")
fio_job.size(stream_size)
fio_job.target(core.path)
if i < sequential_streams:
fio_job.read_write(ReadWrite.write)
else:
fio_job.read_write(ReadWrite.randwrite)
pid = fio.run_in_background()
while TestRun.executor.check_if_process_exists(pid):
sleep(5)
TestRun.LOGGER.info(f"{core.get_statistics()}")
@pytest.mark.parametrizex("streams_seq_rand", [(64, 64), (64, 192)])
@pytest.mark.parametrizex("filesystem", Filesystem)
@pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites))
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mode):
"""
title: Stress test for multistream sequential cutoff on the device with a filesystem
description: |
Testing the stability of a system when there are multiple sequential and random I/O streams
running against the exported object with a filesystem when the sequential cutoff policy is
set to always and the sequential cutoff threshold is set to a value which is able
to be reached by sequential I/O streams.
pass_criteria:
- No system crash
"""
mount_point = "/mnt"
with TestRun.step("Prepare devices. Create filesystem on core device."):
cache_disk = TestRun.disks['cache']
core_disk = TestRun.disks['core']
core_disk.create_filesystem(filesystem)
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_disk, cache_mode, force=True)
Udev.disable()
core = cache.add_core(core_disk)
with TestRun.step("Mount core."):
core.mount(mount_point)
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB."):
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(Size(20, Unit.MebiByte))
with TestRun.step("Reset core statistics counters."):
core.reset_counters()
with TestRun.step("Run I/O"):
sequential_streams = streams_seq_rand[0]
random_streams = streams_seq_rand[1]
stream_size = core_disk.size / 256
fio = (Fio().create_command()
.io_engine(IoEngine.libaio)
.block_size(Size(1, Unit.Blocks4096))
.direct()
.offset_increment(stream_size))
for i in range(0, sequential_streams + random_streams):
fio_job = fio.add_job(job_name=f"stream_{i}")
fio_job.size(stream_size)
fio_job.target(os.path.join(mount_point, f"file_{i}"))
if i < sequential_streams:
fio_job.read_write(ReadWrite.write)
else:
fio_job.read_write(ReadWrite.randwrite)
pid = fio.run_in_background()
while TestRun.executor.check_if_process_exists(pid):
sleep(5)
TestRun.LOGGER.info(f"{core.get_statistics()}")
| 44.270161 | 100 | 0.676109 |
import os
import random
from time import sleep
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode, SeqCutOffPolicy, CacheModeTrait
from core.test_run_utils import TestRun
from storage_devices.disk import DiskTypeSet, DiskTypeLowerThan, DiskType
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import IoEngine, ReadWrite
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
random_thresholds = random.sample(range(1028, 1024 ** 2, 4), 3)
random_stream_numbers = random.sample(range(2, 128), 3)
@pytest.mark.parametrizex("streams_number", [1, 128] + random_stream_numbers)
@pytest.mark.parametrizex("threshold",
[Size(1, Unit.MebiByte), Size(1, Unit.GibiByte)]
+ [Size(x, Unit.KibiByte) for x in random_thresholds])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_multistream_seq_cutoff_functional(threshold, streams_number):
with TestRun.step("Start cache and add core device."):
cache_disk = TestRun.disks['cache']
core_disk = TestRun.disks['core']
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
Udev.disable()
core = cache.add_core(core_disk)
with TestRun.step(f"Set seq-cutoff policy to always, threshold to {threshold} "
f"and reset statistics counters."):
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold)
core.set_seq_cutoff_promotion_count(1)
core.reset_counters()
with TestRun.step(f"Run {streams_number} I/O streams with amount of sequential writes equal to "
f"seq-cutoff threshold value minus one 4k block."):
kib_between_streams = 100
range_step = int(threshold.get_value(Unit.KibiByte)) + kib_between_streams
max_range_offset = streams_number * range_step
offsets = [o for o in range(0, max_range_offset, range_step)]
core_statistics_before = core.get_statistics()
for i in TestRun.iteration(range(0, len(offsets))):
TestRun.LOGGER.info(f"Statistics before I/O:\n{core_statistics_before}")
offset = Size(offsets[i], Unit.KibiByte)
run_dd(core.path, count=int(threshold.get_value(Unit.Blocks4096) - 1),
seek=int(offset.get_value(Unit.Blocks4096)))
core_statistics_after = core.get_statistics()
check_statistics(core_statistics_before,
core_statistics_after,
expected_pt=0,
expected_writes_to_cache=threshold - Size(1, Unit.Blocks4096))
core_statistics_before = core_statistics_after
with TestRun.step("Write random number of 4k block requests to each stream and check if all "
"writes were sent in pass-through mode."):
core_statistics_before = core.get_statistics()
random.shuffle(offsets)
for i in TestRun.iteration(range(0, len(offsets))):
TestRun.LOGGER.info(f"Statistics before second I/O:\n{core_statistics_before}")
additional_4k_blocks_writes = random.randint(1, kib_between_streams / 4)
offset = Size(offsets[i], Unit.KibiByte)
run_dd(
core.path, count=additional_4k_blocks_writes,
seek=int(offset.get_value(Unit.Blocks4096)
+ threshold.get_value(Unit.Blocks4096) - 1))
core_statistics_after = core.get_statistics()
check_statistics(core_statistics_before,
core_statistics_after,
expected_pt=additional_4k_blocks_writes,
expected_writes_to_cache=Size.zero())
core_statistics_before = core_statistics_after
def check_statistics(stats_before, stats_after, expected_pt, expected_writes_to_cache):
TestRun.LOGGER.info(f"Statistics after I/O:\n{stats_after}")
writes_to_cache_before = stats_before.block_stats.cache.writes
writes_to_cache_after = stats_after.block_stats.cache.writes
pt_writes_before = stats_before.request_stats.pass_through_writes
pt_writes_after = stats_after.request_stats.pass_through_writes
actual_pt = pt_writes_after - pt_writes_before
actual_writes_to_cache = writes_to_cache_after - writes_to_cache_before
if actual_pt != expected_pt:
TestRun.LOGGER.error(f"Expected pass-through writes: {expected_pt}\n"
f"Actual pass-through writes: {actual_pt}")
if actual_writes_to_cache != expected_writes_to_cache:
TestRun.LOGGER.error(
f"Expected writes to cache: {expected_writes_to_cache}\n"
f"Actual writes to cache: {actual_writes_to_cache}")
def run_dd(target_path, count, seek):
dd = Dd() \
.input("/dev/zero") \
.output(target_path) \
.block_size(Size(1, Unit.Blocks4096)) \
.count(count) \
.oflag("direct") \
.seek(seek)
dd.run()
TestRun.LOGGER.info(f"dd command:\n{dd}")
@pytest.mark.parametrizex("streams_seq_rand", [(64, 64), (64, 192)])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
with TestRun.step("Start cache and add core device."):
cache_disk = TestRun.disks['cache']
core_disk = TestRun.disks['core']
cache_disk.create_partitions([Size(1.5, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
Udev.disable()
core = cache.add_core(core_disk)
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 512KiB."):
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(Size(512, Unit.KibiByte))
with TestRun.step("Reset core statistics counters."):
core.reset_counters()
with TestRun.step("Run I/O"):
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
sequential_streams = streams_seq_rand[0]
random_streams = streams_seq_rand[1]
fio = (Fio().create_command()
.io_engine(IoEngine.libaio)
.block_size(Size(1, Unit.Blocks4096))
.direct()
.offset_increment(stream_size))
for i in range(0, sequential_streams + random_streams):
fio_job = fio.add_job(job_name=f"stream_{i}")
fio_job.size(stream_size)
fio_job.target(core.path)
if i < sequential_streams:
fio_job.read_write(ReadWrite.write)
else:
fio_job.read_write(ReadWrite.randwrite)
pid = fio.run_in_background()
while TestRun.executor.check_if_process_exists(pid):
sleep(5)
TestRun.LOGGER.info(f"{core.get_statistics()}")
@pytest.mark.parametrizex("streams_seq_rand", [(64, 64), (64, 192)])
@pytest.mark.parametrizex("filesystem", Filesystem)
@pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites))
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mode):
mount_point = "/mnt"
with TestRun.step("Prepare devices. Create filesystem on core device."):
cache_disk = TestRun.disks['cache']
core_disk = TestRun.disks['core']
core_disk.create_filesystem(filesystem)
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_disk, cache_mode, force=True)
Udev.disable()
core = cache.add_core(core_disk)
with TestRun.step("Mount core."):
core.mount(mount_point)
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB."):
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(Size(20, Unit.MebiByte))
with TestRun.step("Reset core statistics counters."):
core.reset_counters()
with TestRun.step("Run I/O"):
sequential_streams = streams_seq_rand[0]
random_streams = streams_seq_rand[1]
stream_size = core_disk.size / 256
fio = (Fio().create_command()
.io_engine(IoEngine.libaio)
.block_size(Size(1, Unit.Blocks4096))
.direct()
.offset_increment(stream_size))
for i in range(0, sequential_streams + random_streams):
fio_job = fio.add_job(job_name=f"stream_{i}")
fio_job.size(stream_size)
fio_job.target(os.path.join(mount_point, f"file_{i}"))
if i < sequential_streams:
fio_job.read_write(ReadWrite.write)
else:
fio_job.read_write(ReadWrite.randwrite)
pid = fio.run_in_background()
while TestRun.executor.check_if_process_exists(pid):
sleep(5)
TestRun.LOGGER.info(f"{core.get_statistics()}")
| true | true |
f7f584d3d3cdb8b88ab3c13af892680fdaed39f5 | 63,165 | py | Python | numpy/core/fromnumeric.py | illume/numpy3k | 42171a679b0ef24932fe08fc88cce039abf6de2b | [
"BSD-3-Clause"
] | 2 | 2020-07-03T12:00:29.000Z | 2021-04-18T06:54:30.000Z | numpy/core/fromnumeric.py | illume/numpy3k | 42171a679b0ef24932fe08fc88cce039abf6de2b | [
"BSD-3-Clause"
] | null | null | null | numpy/core/fromnumeric.py | illume/numpy3k | 42171a679b0ef24932fe08fc88cce039abf6de2b | [
"BSD-3-Clause"
] | null | null | null | # Module containing non-deprecated functions borrowed from Numeric.
__docformat__ = "restructuredtext en"
# functions that are now methods
__all__ = ['take', 'reshape', 'choose', 'repeat', 'put',
'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
'searchsorted', 'alen',
'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze',
'amax', 'amin',
]
import multiarray as mu
import umath as um
import numerictypes as nt
from numeric import asarray, array, asanyarray, concatenate
_dt_ = nt.sctype2char
import types
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = types.NoneType
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj),method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
"""
try:
take = a.take
except AttributeError:
return _wrapit(a, 'take', indices, axis, out, mode)
return take(indices, axis, out, mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : {tuple, int}
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) order or FORTRAN (column-major) order.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy.
See Also
--------
ndarray.reshape : Equivalent method.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
try:
reshape = a.reshape
except AttributeError:
return _wrapit(a, 'reshape', newshape, order=order)
return reshape(newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this function
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, then the new array will have the value that
choices[i] contains in the same place.
Parameters
----------
a : int array
This array must contain integers in [0, n-1], where n is the number
of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave:
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
The merged results.
See Also
--------
ndarray.choose : equivalent method
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices)
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip')
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='wrap')
array([20, 1, 12, 3])
"""
try:
choose = a.choose
except AttributeError:
return _wrapit(a, 'choose', choices, out=out, mode=mode)
return choose(choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : {int, array of ints}
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
try:
repeat = a.repeat
except AttributeError:
return _wrapit(a, 'repeat', repeats, axis)
return repeat(repeats, axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
return a.put(ind, v, mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
If `a` is an ndarray, then a view of `a` is returned; otherwise
a new array is created.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
try:
swapaxes = a.swapaxes
except AttributeError:
return _wrapit(a, 'swapaxes', axis1, axis2)
return swapaxes(axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
rollaxis
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
try:
transpose = a.transpose
except AttributeError:
return _wrapit(a, 'transpose', axes)
return transpose(axes)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
Real: [R, nan]
Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy()
a.sort(axis, kind, order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of Numpy 1.4.0 argsort works with real/complex arrays containing
nan values. The enhanced sort order is documented in the numpy.sort.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
def argmax(a, axis=None):
"""
Indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
Returns
-------
index_array : ndarray, int
Array of indices into the array. It has the same shape as `a`,
except with `axis` removed.
See Also
--------
argmin : Indices of the minimum values along an axis.
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
"""
try:
argmax = a.argmax
except AttributeError:
return _wrapit(a, 'argmax', axis)
return argmax(axis)
def argmin(a, axis=None):
"""
Return the indices of the minimum values along an axis.
See Also
--------
argmax : Similar function. Please refer to `numpy.argmax` for detailed
documentation.
"""
try:
argmin = a.argmin
except AttributeError:
return _wrapit(a, 'argmin', axis)
return argmin(axis)
def searchsorted(a, v, side='left'):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the corresponding
elements in `v` were inserted before the indices, the order of `a` would
be preserved.
Parameters
----------
a : 1-D array_like
Input array, sorted in ascending order.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given. If
'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of Numpy 1.4.0 searchsorted works with real/complex arrays containing
nan values. The enhanced sort order is documented in the numpy.sort.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
try:
searchsorted = a.searchsorted
except AttributeError:
return _wrapit(a, 'searchsorted', v, side)
return searchsorted(v, side)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new array
is filled with repeated copied of `a`. Note that this behavior is different
from a.resize(new_shape) which fills with zeros instead of repeated
copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : {tuple, int}
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated if
necessary to fill out the required number of elements.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na: return mu.zeros(new_shape, a.dtype.char)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
Returns
-------
squeezed : ndarray
The input array, but with with all dimensions of length 1
removed. Whenever possible, a view on `a` is returned.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form `a[i,i+offset]`.
If `a` has more than two dimensions, then the axes specified
by `axis1` and `axis2` are used to determine the 2-D subarray
whose diagonal is returned. The shape of the resulting array
can be determined by removing `axis1` and `axis2` and appending
an index to the right equal to the size of the resulting diagonals.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D subarrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D subarrays from which
the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, a 1-D array containing the diagonal is
returned. If `a` has larger dimensions, then an array of
diagonals is returned.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : Matlab workalike for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
>>> a = np.arange(8).reshape(2,2,2)
>>> a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0,-2,-1)
array([[0, 3],
[4, 7]])
"""
return asarray(a).diagonal(offset, axis1, axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
return asarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
"""
Return a flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F'}, optional
The elements of `a` are read in this order. It can be either
'C' for row-major order, or `F` for column-major order.
By default, row-major order is used.
Returns
-------
1d_array : ndarray
Output of the same dtype as `a`, and of shape ``(a.size(),)``.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
Notes
-----
In row-major order, the row index varies the slowest, and the column
index the quickest. This can be generalized to multiple dimensions,
where row-major order implies that the index along the first axis
varies slowest, and the index along the last quickest. The opposite holds
for Fortran-, or column-major, mode.
Examples
--------
If an array is in C-order (default), then `ravel` is equivalent
to ``reshape(-1)``:
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print x.reshape(-1)
[1 2 3 4 5 6]
>>> print np.ravel(x)
[1 2 3 4 5 6]
When flattening using Fortran-order, however, we see
>>> print np.ravel(x, order='F')
[1 4 2 5 3 6]
"""
return asarray(a).ravel(order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`, containing
the indices of the non-zero elements in that dimension. The
corresponding non-zero values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
try:
nonzero = a.nonzero
except AttributeError:
res = _wrapit(a, 'nonzero')
else:
res = nonzero()
return res
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple
The elements of the tuple give the lengths of the corresponding array
dimensions.
See Also
--------
alen,
ndarray.shape : array method
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1,2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1,2),(3,4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method.
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
try:
compress = a.compress
except AttributeError:
return _wrapit(a, 'compress', condition, axis, out)
return compress(condition, axis, out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
try:
clip = a.clip
except AttributeError:
return _wrapit(a, 'clip', a_min, a_max, out)
return clip(a_min, a_max, out)
def sum(a, axis=None, dtype=None, out=None):
"""
Return the sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : integer, optional
Axis over which the sum is taken. By default `axis` is None,
and all elements are summed.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : equivalent method
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
try:
sum = a.sum
except AttributeError:
return _wrapit(a, 'sum', axis, dtype, out)
return sum(axis, dtype, out)
def product (a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def sometrue(a, axis=None, out=None):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def alltrue (a, axis=None, out=None):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def any(a,axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which an logical OR is performed.
The default (`axis` = `None`) is to perform a logical OR
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape as the expected output and
the type is preserved.
Returns
-------
any : bool, ndarray
A new boolean or `ndarray` is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28224368, 28224368, array([ True], dtype=bool))
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def all(a,axis=None, out=None):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which an logical AND is performed.
The default (`axis` = `None`) is to perform a logical AND
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape as the expected output and
the type is preserved.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array([ True], dtype=bool))
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def cumsum (a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumsum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
try:
cumsum = a.cumsum
except AttributeError:
return _wrapit(a, 'cumsum', axis, dtype, out)
return cumsum(axis, dtype, out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
try:
ptp = a.ptp
except AttributeError:
return _wrapit(a, 'ptp', axis, out)
return ptp(axis, out)
def amax(a, axis=None, out=None):
"""
Return the maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
Returns
-------
amax : ndarray
A new array or a scalar with the result, or a reference to `out`
if it was specified.
See Also
--------
nanmax: nan values are ignored instead of being propagated
fmax: same behavior as the C99 fmax function
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding max value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmax.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a, axis=0)
array([2, 3])
>>> np.amax(a, axis=1)
array([1, 3])
"""
try:
amax = a.max
except AttributeError:
return _wrapit(a, 'max', axis, out)
return amax(axis, out)
def amin(a, axis=None, out=None):
"""
Return the minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default a flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
Returns
-------
amin : ndarray
A new array or a scalar with the result, or a reference to `out` if it
was specified.
See Also
--------
nanmin: nan values are ignored instead of being propagated
fmin: same behavior as the C99 fmin function
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding min value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmin.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
"""
try:
amin = a.min
except AttributeError:
return _wrapit(a, 'min', axis, out)
return amin(axis, out)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a,ndmin=1))
def prod(a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis over which the product is taken. By default, the product
of all elements is calculated.
dtype : data-type, optional
The data-type of the returned array, as well as of the accumulator
in which the elements are multiplied. By default, if `a` is of
integer type, `dtype` is the default platform integer. (Note: if
the type of `a` is unsigned, then so is `dtype`.) Otherwise,
the dtype is the same as that of `a`.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the
output values will be cast if necessary.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default the
input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If dtype is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a precision
less than that of the default platform integer. In that case, the
default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows of)
`a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns of)
`a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
Notes
-----
For values exactly halfway between rounded decimal values, Numpy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5])
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1)
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def mean(a, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken
over the flattened array by default, otherwise over the specified
axis. float64 intermediate and return values are used for integer
inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
mean : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Examples
--------
>>> a = np.array([[1,2],[3,4]])
>>> np.mean(a)
2.5
>>> np.mean(a,0)
array([ 2., 3.])
>>> np.mean(a,1)
array([ 1.5, 3.5])
"""
try:
mean = a.mean
except AttributeError:
return _wrapit(a, 'mean', axis, dtype, out)
return mean(axis, dtype, out)
def std(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The mean is normally calculated as ``x.sum() / N``, where
``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof``
is used instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables. The standard deviation computed in this function
is the square root of the estimated variance, so even with ``ddof=1``, it
will not be an unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
"""
try:
std = a.std
except AttributeError:
return _wrapit(a, 'std', axis, dtype, out, ddof)
return std(axis, dtype, out, ddof)
def var(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by default,
otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
Returns
-------
variance : ndarray, see dtype parameter above
If out=None, returns a new array containing the variance; otherwise
a reference to the output array is returned.
See Also
--------
std : Standard deviation
mean : Average
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
Examples
--------
>>> a = np.array([[1,2],[3,4]])
>>> np.var(a)
1.25
>>> np.var(a,0)
array([ 1., 1.])
>>> np.var(a,1)
array([ 0.25, 0.25])
"""
try:
var = a.var
except AttributeError:
return _wrapit(a, 'var', axis, dtype, out, ddof)
return var(axis, dtype, out, ddof)
| 27.72827 | 79 | 0.580068 |
__docformat__ = "restructuredtext en"
__all__ = ['take', 'reshape', 'choose', 'repeat', 'put',
'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
'searchsorted', 'alen',
'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze',
'amax', 'amin',
]
import multiarray as mu
import umath as um
import numerictypes as nt
from numeric import asarray, array, asanyarray, concatenate
_dt_ = nt.sctype2char
import types
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = types.NoneType
_sum_ = sum
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj),method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def take(a, indices, axis=None, out=None, mode='raise'):
try:
take = a.take
except AttributeError:
return _wrapit(a, 'take', indices, axis, out, mode)
return take(indices, axis, out, mode)
def reshape(a, newshape, order='C'):
try:
reshape = a.reshape
except AttributeError:
return _wrapit(a, 'reshape', newshape, order=order)
return reshape(newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
try:
choose = a.choose
except AttributeError:
return _wrapit(a, 'choose', choices, out=out, mode=mode)
return choose(choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
try:
repeat = a.repeat
except AttributeError:
return _wrapit(a, 'repeat', repeats, axis)
return repeat(repeats, axis)
def put(a, ind, v, mode='raise'):
return a.put(ind, v, mode)
def swapaxes(a, axis1, axis2):
try:
swapaxes = a.swapaxes
except AttributeError:
return _wrapit(a, 'swapaxes', axis1, axis2)
return swapaxes(axis1, axis2)
def transpose(a, axes=None):
try:
transpose = a.transpose
except AttributeError:
return _wrapit(a, 'transpose', axes)
return transpose(axes)
def sort(a, axis=-1, kind='quicksort', order=None):
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy()
a.sort(axis, kind, order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
def argmax(a, axis=None):
try:
argmax = a.argmax
except AttributeError:
return _wrapit(a, 'argmax', axis)
return argmax(axis)
def argmin(a, axis=None):
try:
argmin = a.argmin
except AttributeError:
return _wrapit(a, 'argmin', axis)
return argmin(axis)
def searchsorted(a, v, side='left'):
try:
searchsorted = a.searchsorted
except AttributeError:
return _wrapit(a, 'searchsorted', v, side)
return searchsorted(v, side)
def resize(a, new_shape):
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na: return mu.zeros(new_shape, a.dtype.char)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a):
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
return asarray(a).diagonal(offset, axis1, axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return asarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
return asarray(a).ravel(order)
def nonzero(a):
try:
nonzero = a.nonzero
except AttributeError:
res = _wrapit(a, 'nonzero')
else:
res = nonzero()
return res
def shape(a):
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
try:
compress = a.compress
except AttributeError:
return _wrapit(a, 'compress', condition, axis, out)
return compress(condition, axis, out)
def clip(a, a_min, a_max, out=None):
try:
clip = a.clip
except AttributeError:
return _wrapit(a, 'clip', a_min, a_max, out)
return clip(a_min, a_max, out)
def sum(a, axis=None, dtype=None, out=None):
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
try:
sum = a.sum
except AttributeError:
return _wrapit(a, 'sum', axis, dtype, out)
return sum(axis, dtype, out)
def product (a, axis=None, dtype=None, out=None):
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def sometrue(a, axis=None, out=None):
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def alltrue (a, axis=None, out=None):
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def any(a,axis=None, out=None):
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def all(a,axis=None, out=None):
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def cumsum (a, axis=None, dtype=None, out=None):
try:
cumsum = a.cumsum
except AttributeError:
return _wrapit(a, 'cumsum', axis, dtype, out)
return cumsum(axis, dtype, out)
def cumproduct(a, axis=None, dtype=None, out=None):
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ptp(a, axis=None, out=None):
try:
ptp = a.ptp
except AttributeError:
return _wrapit(a, 'ptp', axis, out)
return ptp(axis, out)
def amax(a, axis=None, out=None):
try:
amax = a.max
except AttributeError:
return _wrapit(a, 'max', axis, out)
return amax(axis, out)
def amin(a, axis=None, out=None):
try:
amin = a.min
except AttributeError:
return _wrapit(a, 'min', axis, out)
return amin(axis, out)
def alen(a):
try:
return len(a)
except TypeError:
return len(array(a,ndmin=1))
def prod(a, axis=None, dtype=None, out=None):
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def cumprod(a, axis=None, dtype=None, out=None):
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ndim(a):
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def round_(a, decimals=0, out=None):
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def mean(a, axis=None, dtype=None, out=None):
try:
mean = a.mean
except AttributeError:
return _wrapit(a, 'mean', axis, dtype, out)
return mean(axis, dtype, out)
def std(a, axis=None, dtype=None, out=None, ddof=0):
try:
std = a.std
except AttributeError:
return _wrapit(a, 'std', axis, dtype, out, ddof)
return std(axis, dtype, out, ddof)
def var(a, axis=None, dtype=None, out=None, ddof=0):
try:
var = a.var
except AttributeError:
return _wrapit(a, 'var', axis, dtype, out, ddof)
return var(axis, dtype, out, ddof)
| true | true |
f7f586269f133a5ac939c850bf82dc77c738daf0 | 18,321 | py | Python | megatron/data/indexed_dataset.py | sourcery-ai-bot/Megatron-LM | f27f44e2c49d1cb39b2288bef6f7d837e11094cb | [
"MIT"
] | null | null | null | megatron/data/indexed_dataset.py | sourcery-ai-bot/Megatron-LM | f27f44e2c49d1cb39b2288bef6f7d837e11094cb | [
"MIT"
] | null | null | null | megatron/data/indexed_dataset.py | sourcery-ai-bot/Megatron-LM | f27f44e2c49d1cb39b2288bef6f7d837e11094cb | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
from functools import lru_cache
import os
import shutil
import struct
from itertools import accumulate
import numpy as np
import torch
from megatron import print_rank_0
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
if impl == 'infer':
impl = infer_dataset_impl(path)
if impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.doc_count = struct.unpack('<Q', f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
return np.split(a, offsets[:-1])
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = sum(
self.data_offsets[i + 1] - self.data_offsets[i] for i in indices
)
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx: ptx + a.size])
return a
elif isinstance(idx, slice):
return [self[i] for i in range(*idx.indices(len(self)))]
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
with open(index_file, 'wb') as index:
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack('<Q', len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
self._file.write(struct.pack('<Q', len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order='C'))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
self._doc_count = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
print_rank_0(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print_rank_0(" reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer,
dtype=np.int32,
count=self._len,
offset=offset)
print_rank_0(" reading pointers...")
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
print_rank_0(" reading document index...")
self._doc_idx = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print_rank_0(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print_rank_0(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
print_rank_0(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=size, offset=ptr)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=total_size, offset=ptr)
return np.split(np_array, offsets[:-1])
def get(self, idx, offset=0, length=None):
""" Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
return np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=length, offset=ptr)
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
| 32.484043 | 105 | 0.582828 |
from functools import lru_cache
import os
import shutil
import struct
from itertools import accumulate
import numpy as np
import torch
from megatron import print_rank_0
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
if impl == 'infer':
impl = infer_dataset_impl(path)
if impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.doc_count = struct.unpack('<Q', f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
return np.split(a, offsets[:-1])
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = sum(
self.data_offsets[i + 1] - self.data_offsets[i] for i in indices
)
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx: ptx + a.size])
return a
elif isinstance(idx, slice):
return [self[i] for i in range(*idx.indices(len(self)))]
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
with open(index_file, 'wb') as index:
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack('<Q', len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
self._file.write(struct.pack('<Q', len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order='C'))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
self._doc_count = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
print_rank_0(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print_rank_0(" reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer,
dtype=np.int32,
count=self._len,
offset=offset)
print_rank_0(" reading pointers...")
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
print_rank_0(" reading document index...")
self._doc_idx = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print_rank_0(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print_rank_0(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
print_rank_0(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=size, offset=ptr)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=total_size, offset=ptr)
return np.split(np_array, offsets[:-1])
def get(self, idx, offset=0, length=None):
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
return np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=length, offset=ptr)
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
| true | true |
f7f5873669ac7813b11f6043827be50764aa2778 | 2,215 | py | Python | libs/jieba/cut_text.py | dnxbjyj/python-basic | 14cba06cd84715ebb8acc6dd3cef84789316d81e | [
"MIT"
] | 3 | 2018-03-19T07:57:10.000Z | 2021-07-05T08:55:14.000Z | libs/jieba/cut_text.py | dnxbjyj/python-basic | 14cba06cd84715ebb8acc6dd3cef84789316d81e | [
"MIT"
] | 6 | 2020-03-24T15:40:18.000Z | 2021-12-13T19:46:09.000Z | libs/jieba/cut_text.py | dnxbjyj/python-basic | 14cba06cd84715ebb8acc6dd3cef84789316d81e | [
"MIT"
] | 4 | 2018-03-29T21:59:55.000Z | 2019-12-16T14:56:38.000Z | # coding:utf-8
import jieba.posseg as psg
from collections import Counter
import sys
def cut_and_cache(text):
'''
对文本分词并标注词性,并缓存到文件
将文本分词,并附带上词性,因为数据量比较大,防止每次运行脚本都花大量时间,所以第一次分词后就将结果存入文件cut_result.txt中
相当于做一个缓存,格式为每个词占一行,每一行的内容为:
词,词性
:param text: 要进行分词的文本
:return: None
'''
words_with_attr = [(x.word, x.flag) for x in psg.cut(text) if len(x.word) >= 2]
with open('cut_result.txt', 'w+') as f:
for x in words_with_attr:
f.write('{0}\t{1}\n'.format(x[0], x[1]))
def read_cut_result_from_cache_file():
'''
从cut_result.txt中读取带词性的分词结果列表
:return: 带词性的分词结果列表
'''
words_with_attr = []
with open('cut_result.txt', 'r') as f:
for x in f.readlines():
pair = x.split()
if len(pair) < 2:
continue
words_with_attr.append((pair[0], pair[1]))
return words_with_attr
def build_attr_dict(words_with_attr):
'''
将分词列表的词性构建成一个字典,以便后面使用,格式为:
{词:词性}
:param words_with_attr:
:return:
'''
attr_dict = {}
for x in words_with_attr:
attr_dict[x[0]] = x[1]
return attr_dict
def get_topn_words(words, topn):
'''
统计在分词表中出现次数排名前topn的词的列表,并将结果输出到文件top_n_result.txt中,每行一个词,格式为:
词,出现次数
:param words:
:param topn:
:return:
'''
c = Counter(words).most_common(topn)
with open('top_n_result.txt', 'w+') as f:
for x in c:
f.write('{0},{1}\n'.format(x[0], x[1]))
def main():
# 设置环境为utf-8编码格式,防止处理中文出错
reload(sys)
sys.setdefaultencoding('utf-8')
# 读取文本文件文本
text = open('./merge.txt').read()
# 分词并缓存,只需运行一次,后续可注释掉
# cut_and_cache(text)
# 从cut_result.txt中读取带词性的分词结果列表
words_with_attr = read_cut_result_from_cache_file()
# 要过滤掉的词性列表
stop_attr = ['a', 'ad', 'b', 'c', 'd', 'f', 'df', 'm', 'mq', 'p', 'r', 'rr', 's', 't', 'u', 'v', 'z']
# 过滤掉不需要的词性的词
words = [x[0] for x in words_with_attr if x[1] not in stop_attr]
# 获取topn的词并存入文件top_n_result.txt
get_topn_words(words=words, topn=100)
print 'all is finished!'
if __name__ == '__main__':
main() | 25.45977 | 106 | 0.578781 |
import jieba.posseg as psg
from collections import Counter
import sys
def cut_and_cache(text):
'''
对文本分词并标注词性,并缓存到文件
将文本分词,并附带上词性,因为数据量比较大,防止每次运行脚本都花大量时间,所以第一次分词后就将结果存入文件cut_result.txt中
相当于做一个缓存,格式为每个词占一行,每一行的内容为:
词,词性
:param text: 要进行分词的文本
:return: None
'''
words_with_attr = [(x.word, x.flag) for x in psg.cut(text) if len(x.word) >= 2]
with open('cut_result.txt', 'w+') as f:
for x in words_with_attr:
f.write('{0}\t{1}\n'.format(x[0], x[1]))
def read_cut_result_from_cache_file():
'''
从cut_result.txt中读取带词性的分词结果列表
:return: 带词性的分词结果列表
'''
words_with_attr = []
with open('cut_result.txt', 'r') as f:
for x in f.readlines():
pair = x.split()
if len(pair) < 2:
continue
words_with_attr.append((pair[0], pair[1]))
return words_with_attr
def build_attr_dict(words_with_attr):
'''
将分词列表的词性构建成一个字典,以便后面使用,格式为:
{词:词性}
:param words_with_attr:
:return:
'''
attr_dict = {}
for x in words_with_attr:
attr_dict[x[0]] = x[1]
return attr_dict
def get_topn_words(words, topn):
'''
统计在分词表中出现次数排名前topn的词的列表,并将结果输出到文件top_n_result.txt中,每行一个词,格式为:
词,出现次数
:param words:
:param topn:
:return:
'''
c = Counter(words).most_common(topn)
with open('top_n_result.txt', 'w+') as f:
for x in c:
f.write('{0},{1}\n'.format(x[0], x[1]))
def main():
reload(sys)
sys.setdefaultencoding('utf-8')
text = open('./merge.txt').read()
words_with_attr = read_cut_result_from_cache_file()
stop_attr = ['a', 'ad', 'b', 'c', 'd', 'f', 'df', 'm', 'mq', 'p', 'r', 'rr', 's', 't', 'u', 'v', 'z']
words = [x[0] for x in words_with_attr if x[1] not in stop_attr]
get_topn_words(words=words, topn=100)
print 'all is finished!'
if __name__ == '__main__':
main() | false | true |
f7f587622de60fe300b5b2617de3c56677e5e899 | 560 | py | Python | nemde/tests/test_io_casefile.py | akxen/nemde | 23afcdf128352f12d3074194a1321a8f810f4407 | [
"Apache-2.0"
] | null | null | null | nemde/tests/test_io_casefile.py | akxen/nemde | 23afcdf128352f12d3074194a1321a8f810f4407 | [
"Apache-2.0"
] | null | null | null | nemde/tests/test_io_casefile.py | akxen/nemde | 23afcdf128352f12d3074194a1321a8f810f4407 | [
"Apache-2.0"
] | 1 | 2022-01-16T01:48:54.000Z | 2022-01-16T01:48:54.000Z | """
Test loading of casefiles
"""
import os
import pytest
import context
from nemde.io.casefile import load_xml_from_archive
@pytest.mark.skip(reason='Not including casefile archive in container')
def test_load_casefile_from_database():
"""Load casefile from a database"""
year = int(os.environ['TEST_YEAR'])
month = int(os.environ['TEST_MONTH'])
casefile = load_xml_from_archive(data_dir=os.environ['CASEFILE_DIR'],
year=year, month=month, day=1, interval=1)
assert isinstance(casefile, bytes)
| 23.333333 | 79 | 0.694643 |
import os
import pytest
import context
from nemde.io.casefile import load_xml_from_archive
@pytest.mark.skip(reason='Not including casefile archive in container')
def test_load_casefile_from_database():
year = int(os.environ['TEST_YEAR'])
month = int(os.environ['TEST_MONTH'])
casefile = load_xml_from_archive(data_dir=os.environ['CASEFILE_DIR'],
year=year, month=month, day=1, interval=1)
assert isinstance(casefile, bytes)
| true | true |
f7f587ad39dc444cf6f490e5a93aee8bcd5f1ceb | 1,967 | py | Python | tests/test_model_form_field.py | mfogel/django-timezone-field | 66337bcea5f9eb66923e0c85239dc1a652c93472 | [
"BSD-2-Clause"
] | 263 | 2015-01-10T01:07:22.000Z | 2022-03-25T08:56:31.000Z | tests/test_model_form_field.py | mfogel/django-timezone-field | 66337bcea5f9eb66923e0c85239dc1a652c93472 | [
"BSD-2-Clause"
] | 67 | 2015-02-06T01:15:40.000Z | 2022-03-17T16:51:48.000Z | tests/test_model_form_field.py | mfogel/django-timezone-field | 66337bcea5f9eb66923e0c85239dc1a652c93472 | [
"BSD-2-Clause"
] | 74 | 2015-01-09T02:35:40.000Z | 2022-03-04T21:44:20.000Z | import pytest
@pytest.mark.django_db
def test_valid_with_defaults(Model, ModelForm, pst, pst_tz, gmt, gmt_tz):
# seems there should be a better way to get a form's default values...?
# http://stackoverflow.com/questions/7399490/
data = dict(
(field_name, field.initial)
for field_name, field in ModelForm().fields.items()
)
data.update({'tz': gmt})
form = ModelForm(data=data)
assert form.is_valid()
form.save()
assert Model.objects.count() == 1
m = Model.objects.get()
assert m.tz == gmt_tz
assert m.tz_opt is None
assert m.tz_opt_default == pst_tz
assert m.tz_gmt_offset is None
@pytest.mark.django_db
def test_valid_specify_all(Model, ModelForm, utc, pst, gmt, utc_tz, gmt_tz, pst_tz):
form = ModelForm({
'tz': utc,
'tz_opt': pst,
'tz_opt_default': gmt,
'tz_gmt_offset': utc,
})
assert form.is_valid()
form.save()
assert Model.objects.count() == 1
m = Model.objects.get()
assert m.tz == utc_tz
assert m.tz_opt == pst_tz
assert m.tz_opt_default == gmt_tz
assert m.tz_gmt_offset == utc_tz
@pytest.mark.parametrize('tz, error_keyword', [
[None, 'required'],
[pytest.lazy_fixture('invalid_tz'), 'choice'],
[pytest.lazy_fixture('uncommon_tz'), 'choice'],
])
def test_invalid_not_blank(ModelForm, tz, error_keyword):
form = ModelForm({'tz': tz})
assert not form.is_valid()
assert any(error_keyword in e for e in form.errors['tz'])
def test_default_human_readable_choices_dont_have_underscores(ModelForm, pst_tz):
form = ModelForm()
pst_choice = [c for c in form.fields['tz'].choices if c[0] == pst_tz]
assert pst_choice[0][1] == 'America/Los Angeles'
def test_display_gmt_offsets(ModelForm, pst_tz):
form = ModelForm({'tz_gmt_offset': pst_tz})
c = [c for c in form.fields['tz_gmt_offset'].choices if c[0] == pst_tz]
assert c[0][1] == 'GMT-08:00 America/Los Angeles'
| 30.261538 | 84 | 0.663955 | import pytest
@pytest.mark.django_db
def test_valid_with_defaults(Model, ModelForm, pst, pst_tz, gmt, gmt_tz):
# http://stackoverflow.com/questions/7399490/
data = dict(
(field_name, field.initial)
for field_name, field in ModelForm().fields.items()
)
data.update({'tz': gmt})
form = ModelForm(data=data)
assert form.is_valid()
form.save()
assert Model.objects.count() == 1
m = Model.objects.get()
assert m.tz == gmt_tz
assert m.tz_opt is None
assert m.tz_opt_default == pst_tz
assert m.tz_gmt_offset is None
@pytest.mark.django_db
def test_valid_specify_all(Model, ModelForm, utc, pst, gmt, utc_tz, gmt_tz, pst_tz):
form = ModelForm({
'tz': utc,
'tz_opt': pst,
'tz_opt_default': gmt,
'tz_gmt_offset': utc,
})
assert form.is_valid()
form.save()
assert Model.objects.count() == 1
m = Model.objects.get()
assert m.tz == utc_tz
assert m.tz_opt == pst_tz
assert m.tz_opt_default == gmt_tz
assert m.tz_gmt_offset == utc_tz
@pytest.mark.parametrize('tz, error_keyword', [
[None, 'required'],
[pytest.lazy_fixture('invalid_tz'), 'choice'],
[pytest.lazy_fixture('uncommon_tz'), 'choice'],
])
def test_invalid_not_blank(ModelForm, tz, error_keyword):
form = ModelForm({'tz': tz})
assert not form.is_valid()
assert any(error_keyword in e for e in form.errors['tz'])
def test_default_human_readable_choices_dont_have_underscores(ModelForm, pst_tz):
form = ModelForm()
pst_choice = [c for c in form.fields['tz'].choices if c[0] == pst_tz]
assert pst_choice[0][1] == 'America/Los Angeles'
def test_display_gmt_offsets(ModelForm, pst_tz):
form = ModelForm({'tz_gmt_offset': pst_tz})
c = [c for c in form.fields['tz_gmt_offset'].choices if c[0] == pst_tz]
assert c[0][1] == 'GMT-08:00 America/Los Angeles'
| true | true |
f7f58862b8bc91b085ff5d83efcab50cf1a57351 | 5,976 | py | Python | tests/test_grouping.py | NoraRo/trimesh | ac23bba6ef6de584f822dc00ac0e941e6065ae73 | [
"MIT"
] | null | null | null | tests/test_grouping.py | NoraRo/trimesh | ac23bba6ef6de584f822dc00ac0e941e6065ae73 | [
"MIT"
] | null | null | null | tests/test_grouping.py | NoraRo/trimesh | ac23bba6ef6de584f822dc00ac0e941e6065ae73 | [
"MIT"
] | 1 | 2019-05-31T03:37:21.000Z | 2019-05-31T03:37:21.000Z | try:
from . import generic as g
except BaseException:
import generic as g
class GroupTests(g.unittest.TestCase):
def test_unique_rows(self):
count = 10000
subset = int(count / 10)
# check unique_rows on float data
data = g.np.arange(count * 3).reshape((-1, 3)).astype(g.np.float)
data[:subset] = data[0]
unique, inverse = g.trimesh.grouping.unique_rows(data)
assert (inverse[:subset] == 0).all()
assert len(unique) == count - subset + 1
# check the bitbanging path of hashable rows on small integers
data = data[:, :2].astype(int)
unique, inverse = g.trimesh.grouping.unique_rows(data)
assert (inverse[:subset] == 0).all()
assert len(unique) == count - subset + 1
def test_blocks(self):
blocks = g.trimesh.grouping.blocks
count = 100
subset = int(count / 10)
a = g.np.zeros(count, dtype=g.np.int)
result = blocks(a, min_len=0, only_nonzero=False)
assert len(result) == 1
assert len(result[0]) == count
result = blocks(a, min_len=0, only_nonzero=True)
assert len(result) == 0
result = blocks(a, min_len=count + 1, only_nonzero=False)
assert len(result) == 0
result = blocks(a, max_len=count - 1, only_nonzero=False)
assert len(result) == 0
result = blocks(a, max_len=count + 1, only_nonzero=False)
assert len(result) == 1
assert len(result[0]) == count
a[:subset] = True
result = blocks(a, only_nonzero=False)
assert len(result) == 2
assert set(range(subset)) == set(result[0])
assert set(range(subset, count)) == set(result[1])
assert sum(len(i) for i in result) == count
result = blocks(a, only_nonzero=True)
assert len(result) == 1
assert set(range(subset)) == set(result[0])
assert all(a[i].all() for i in result)
a[0] = False
result = blocks(a, min_len=1, only_nonzero=True)
assert len(result) == 1
assert set(range(1, subset)) == set(result[0])
assert all(a[i].all() for i in result)
result = blocks(a, min_len=1, only_nonzero=False)
assert len(result) == 3
assert sum(len(i) for i in result) == count
a[2] = False
result = blocks(a, min_len=1, only_nonzero=True)
assert len(result) == 2
assert set(result[0]) == set([1])
assert all(a[i].all() for i in result)
def test_runs(self):
a = g.np.array([-1, -1, -1, 0, 0, 1, 1, 2,
0, 3, 3, 4, 4, 5, 5, 6,
6, 7, 7, 8, 8, 9, 9, 9],
dtype=g.np.int)
r = g.trimesh.grouping.merge_runs(a)
u = g.trimesh.grouping.unique_ordered(a)
self.assertTrue((g.np.diff(r) != 0).all())
self.assertTrue((g.np.diff(u) != 0).all())
self.assertTrue(r.size == 12)
self.assertTrue(u.size == 11)
def test_cluster(self):
a = (g.np.random.random((10000, 3)) * 5).astype(int)
r = g.trimesh.grouping.clusters(a, .01)
r = g.trimesh.grouping.group_distance(a, .01)
def test_unique_float(self):
a = g.np.arange(100) / 2.0
t = g.np.tile(a, 2).flatten()
unique = g.trimesh.grouping.unique_float(t)
assert g.np.allclose(unique, a)
unique, index, inverse = g.trimesh.grouping.unique_float(t,
return_index=True,
return_inverse=True)
assert g.np.allclose(unique[inverse], t)
assert g.np.allclose(unique, t[index])
def test_group_rows(self):
a = g.np.arange(100) / 2.0
b = g.np.tile(a, 3).reshape((-1, 3))
c = g.np.vstack((b, b))
gr = g.trimesh.grouping.group_rows(c)
assert gr.shape == (100, 2)
assert g.np.allclose(c[gr].ptp(axis=1), 0.0)
gr = g.trimesh.grouping.group_rows(c, require_count=2)
assert gr.shape == (100, 2)
assert g.np.allclose(c[gr].ptp(axis=1), 0.0)
c = g.np.vstack((c, [1, 2, 3]))
gr = g.trimesh.grouping.group_rows(c, require_count=2)
grd = g.trimesh.grouping.group_rows(c)
# should discard the single element
assert gr.shape == (100, 2)
# should get the single element correctly
assert len(grd) == 101
assert sum(1 for i in grd if len(i) == 2) == 100
assert g.np.allclose(c[gr].ptp(axis=1), 0.0)
def test_group_vector(self):
x = g.np.linspace(-100, 100, 100)
vec = g.np.column_stack((x,
g.np.ones(len(x)),
g.np.zeros(len(x))))
vec = g.trimesh.unitize(vec)
uv, ui = g.trimesh.grouping.group_vectors(vec)
assert g.np.allclose(uv, vec)
assert len(vec) == len(ui)
assert g.np.allclose(uv[ui.flatten()], vec)
vec = g.np.vstack((vec, -vec))
uv, ui = g.trimesh.grouping.group_vectors(vec)
assert g.np.allclose(uv, vec)
assert len(ui) == len(vec)
uv, ui = g.trimesh.grouping.group_vectors(vec,
include_negative=True)
# since we included negative vectors, there should
# be half the number of unique vectors and 2 indexes per vector
assert ui.shape == (100, 2)
assert uv.shape == (100, 3)
assert g.np.allclose(uv, vec[:100])
def test_boolean_rows(self):
a = g.np.arange(10).reshape((-1, 2))
b = g.np.arange(10).reshape((-1, 2)) + 8
# should have one overlapping row
intersection = g.trimesh.grouping.boolean_rows(
a, b, g.np.intersect1d)
assert g.np.allclose(intersection.ravel(), [8, 9])
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| 34.148571 | 85 | 0.547858 | try:
from . import generic as g
except BaseException:
import generic as g
class GroupTests(g.unittest.TestCase):
def test_unique_rows(self):
count = 10000
subset = int(count / 10)
data = g.np.arange(count * 3).reshape((-1, 3)).astype(g.np.float)
data[:subset] = data[0]
unique, inverse = g.trimesh.grouping.unique_rows(data)
assert (inverse[:subset] == 0).all()
assert len(unique) == count - subset + 1
data = data[:, :2].astype(int)
unique, inverse = g.trimesh.grouping.unique_rows(data)
assert (inverse[:subset] == 0).all()
assert len(unique) == count - subset + 1
def test_blocks(self):
blocks = g.trimesh.grouping.blocks
count = 100
subset = int(count / 10)
a = g.np.zeros(count, dtype=g.np.int)
result = blocks(a, min_len=0, only_nonzero=False)
assert len(result) == 1
assert len(result[0]) == count
result = blocks(a, min_len=0, only_nonzero=True)
assert len(result) == 0
result = blocks(a, min_len=count + 1, only_nonzero=False)
assert len(result) == 0
result = blocks(a, max_len=count - 1, only_nonzero=False)
assert len(result) == 0
result = blocks(a, max_len=count + 1, only_nonzero=False)
assert len(result) == 1
assert len(result[0]) == count
a[:subset] = True
result = blocks(a, only_nonzero=False)
assert len(result) == 2
assert set(range(subset)) == set(result[0])
assert set(range(subset, count)) == set(result[1])
assert sum(len(i) for i in result) == count
result = blocks(a, only_nonzero=True)
assert len(result) == 1
assert set(range(subset)) == set(result[0])
assert all(a[i].all() for i in result)
a[0] = False
result = blocks(a, min_len=1, only_nonzero=True)
assert len(result) == 1
assert set(range(1, subset)) == set(result[0])
assert all(a[i].all() for i in result)
result = blocks(a, min_len=1, only_nonzero=False)
assert len(result) == 3
assert sum(len(i) for i in result) == count
a[2] = False
result = blocks(a, min_len=1, only_nonzero=True)
assert len(result) == 2
assert set(result[0]) == set([1])
assert all(a[i].all() for i in result)
def test_runs(self):
a = g.np.array([-1, -1, -1, 0, 0, 1, 1, 2,
0, 3, 3, 4, 4, 5, 5, 6,
6, 7, 7, 8, 8, 9, 9, 9],
dtype=g.np.int)
r = g.trimesh.grouping.merge_runs(a)
u = g.trimesh.grouping.unique_ordered(a)
self.assertTrue((g.np.diff(r) != 0).all())
self.assertTrue((g.np.diff(u) != 0).all())
self.assertTrue(r.size == 12)
self.assertTrue(u.size == 11)
def test_cluster(self):
a = (g.np.random.random((10000, 3)) * 5).astype(int)
r = g.trimesh.grouping.clusters(a, .01)
r = g.trimesh.grouping.group_distance(a, .01)
def test_unique_float(self):
a = g.np.arange(100) / 2.0
t = g.np.tile(a, 2).flatten()
unique = g.trimesh.grouping.unique_float(t)
assert g.np.allclose(unique, a)
unique, index, inverse = g.trimesh.grouping.unique_float(t,
return_index=True,
return_inverse=True)
assert g.np.allclose(unique[inverse], t)
assert g.np.allclose(unique, t[index])
def test_group_rows(self):
a = g.np.arange(100) / 2.0
b = g.np.tile(a, 3).reshape((-1, 3))
c = g.np.vstack((b, b))
gr = g.trimesh.grouping.group_rows(c)
assert gr.shape == (100, 2)
assert g.np.allclose(c[gr].ptp(axis=1), 0.0)
gr = g.trimesh.grouping.group_rows(c, require_count=2)
assert gr.shape == (100, 2)
assert g.np.allclose(c[gr].ptp(axis=1), 0.0)
c = g.np.vstack((c, [1, 2, 3]))
gr = g.trimesh.grouping.group_rows(c, require_count=2)
grd = g.trimesh.grouping.group_rows(c)
assert gr.shape == (100, 2)
assert len(grd) == 101
assert sum(1 for i in grd if len(i) == 2) == 100
assert g.np.allclose(c[gr].ptp(axis=1), 0.0)
def test_group_vector(self):
x = g.np.linspace(-100, 100, 100)
vec = g.np.column_stack((x,
g.np.ones(len(x)),
g.np.zeros(len(x))))
vec = g.trimesh.unitize(vec)
uv, ui = g.trimesh.grouping.group_vectors(vec)
assert g.np.allclose(uv, vec)
assert len(vec) == len(ui)
assert g.np.allclose(uv[ui.flatten()], vec)
vec = g.np.vstack((vec, -vec))
uv, ui = g.trimesh.grouping.group_vectors(vec)
assert g.np.allclose(uv, vec)
assert len(ui) == len(vec)
uv, ui = g.trimesh.grouping.group_vectors(vec,
include_negative=True)
assert ui.shape == (100, 2)
assert uv.shape == (100, 3)
assert g.np.allclose(uv, vec[:100])
def test_boolean_rows(self):
a = g.np.arange(10).reshape((-1, 2))
b = g.np.arange(10).reshape((-1, 2)) + 8
intersection = g.trimesh.grouping.boolean_rows(
a, b, g.np.intersect1d)
assert g.np.allclose(intersection.ravel(), [8, 9])
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| true | true |
f7f5886c92cc4500bda948ea7952ec1d46de279f | 961 | py | Python | Examples/Demos.py | SimpleITK/SimpleITK-MICCAI-2011-Tutorial | c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371 | [
"CC-BY-3.0"
] | 25 | 2015-03-08T16:24:13.000Z | 2021-07-23T02:44:04.000Z | Examples/Demos.py | SimpleITK/SimpleITK-MICCAI-2011-Tutorial | c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371 | [
"CC-BY-3.0"
] | null | null | null | Examples/Demos.py | SimpleITK/SimpleITK-MICCAI-2011-Tutorial | c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371 | [
"CC-BY-3.0"
] | 4 | 2015-01-29T21:29:40.000Z | 2022-03-11T08:14:07.000Z | import IPython.lib.demo as ipd
# To use, run ipython, then
#
# In [1]: %run Demos.py
# In [2]: d = ImageDemo()
# In [3]: d()
# In [4]: d()
def ImageDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/Image.py' )
def InputOutputDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/InputOutput.py' )
def MemoryManagementDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/MemoryManagement.py' )
def FiltersDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial2/Filters.py' )
def MorphologyDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial2/Morphology.py' )
def MeasureRegionsDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/MeasureRegions.py' )
def BorderChangeDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-01-BorderChange.py' )
def NumpyDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-02-Numpy.py' )
def RidgeDetectionDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-04-RidgeDetection.py' )
| 25.972973 | 76 | 0.712799 | import IPython.lib.demo as ipd
def ImageDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/Image.py' )
def InputOutputDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/InputOutput.py' )
def MemoryManagementDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/MemoryManagement.py' )
def FiltersDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial2/Filters.py' )
def MorphologyDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial2/Morphology.py' )
def MeasureRegionsDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/MeasureRegions.py' )
def BorderChangeDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-01-BorderChange.py' )
def NumpyDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-02-Numpy.py' )
def RidgeDetectionDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-04-RidgeDetection.py' )
| true | true |
f7f589ed57b8383158a5429826280a32ecb8c87c | 16,228 | py | Python | hou_parm.py | rbland/HouNodeLib | 50d7c36bddb58f2e2c6be1fa005973fba0b39084 | [
"Unlicense"
] | null | null | null | hou_parm.py | rbland/HouNodeLib | 50d7c36bddb58f2e2c6be1fa005973fba0b39084 | [
"Unlicense"
] | null | null | null | hou_parm.py | rbland/HouNodeLib | 50d7c36bddb58f2e2c6be1fa005973fba0b39084 | [
"Unlicense"
] | null | null | null | # The MetaHouParm and HouParm class..
import os
import re
import hou
class MetaHouParm(type):
"""
The MetaHouParm class is a metaclass that keeps track of which
classes support which parameter types.
"""
PARM_TYPE_TO_CLASS = dict()
def __new__(cls, class_name, bases, class_dict):
"""
Called when a class is created that uses this metaclass
"""
# create the new class
new_cls = type.__new__(cls, class_name, bases, class_dict)
supported_parm_types = class_dict.get('SUPPORTED_TYPES', [])
for parm_type in supported_parm_types:
cls.PARM_TYPE_TO_CLASS[parm_type] = new_cls
return new_cls
@classmethod
def get_node_parm(mcls, hou_node, parm_name):
"""
Instantiates and returns a HouParm object
to represent the specified node paramter.
"""
sesi_tuple = hou_node.parmTuple(parm_name)
if sesi_tuple and len(sesi_tuple) > 1:
parm_template = sesi_tuple.parmTemplate()
return NodeParmTuple(hou_node, parm_name,
parm_template, sesi_tuple)
sesi_parm = hou_node.parm(parm_name)
if not sesi_parm:
return None
parm_template = sesi_parm.parmTemplate()
parm_type = parm_template.type()
cls = mcls.PARM_TYPE_TO_CLASS.get(parm_type, HouParm)
cls = cls.get_class_for_parm(parm_template)
return cls(hou_node, parm_name, parm_template, sesi_parm)
class HouParm(object):
"""
The base class to represent a Houdini node parameter.
"""
__metaclass__ = MetaHouParm
SUPPORTED_TYPES = []
CAST_TYPE = None
def __init__(self, hou_node, parm_name,
parm_template, sesi_parm=None):
"""
Initializer for a Node parameter.
"""
self._hou_node = hou_node
self._parm_name = parm_name
self._parm_template = parm_template
self._sesi_parm = sesi_parm
self._cast_type = None
self._parm_method_names = []
self.update_node_methods()
@classmethod
def get_class_for_parm(cls, parm_template):
"""
Virtual method to control what HouParm class gets used
for the specified parmater template. After the class is identied
by the SUPPORTED_TYPES list.
"""
return cls
def update_node_methods(self, parm_method_names=None):
"""
Updates the list of SESI parm method names
that can be called vicariously on this HouParm instance.
"""
if parm_method_names == None:
parm_method_names = dir(self._sesi_parm)
self._parm_method_names = parm_method_names
def __getattr__(self, key):
"""
Overrides the attribute retrieval method for HouParm instances.
This method is only called if a python attribute or method of the
requested name was not found.
"""
# check for a method on the parm tuple for the requested attribute name
#
if not key.startswith('_'):
method_names = self.__dict__.get('_parm_method_names', None)
sesi_parm = self.__dict__.get('_sesi_parm', None)
if sesi_parm and method_names and key in method_names:
return getattr(sesi_parm, key)
# no valid attribute could be found
raise AttributeError('Unknown attribute "%s"' % key)
def get_value(self):
"""
Returns the value stored by this parameter object.
"""
return self._sesi_parm.eval()
def set_value(self, value):
"""
Sets the value stored by this parameter object.
"""
print value
if self.CAST_TYPE:
value = self.CAST_TYPE(value)
self._sesi_parm.set(value)
def __nonzero__(self):
"""
Handles cast to boolean.
"""
return bool(self.eval())
def __float__(self):
"""
Handles cast to float.
"""
return self.evalAsFloat()
def __int__(self):
"""
Handles cast to integer.
"""
return self.evalAsInt()
def __str__(self):
"""
Handles cast to string.
"""
return self.evalAsString()
def __unicode(self):
"""
Handles cast to unicode.
"""
return unicode(self.evalAsString())
def __coerce__(self, other):
"""
Handles cast to string.
"""
if other == None:
return (True, None)
# coercion to primitive types
#
if isinstance(other, float):
return (float(self), other)
if isinstance(other, int):
return (int(self), other)
if isinstance(other, bool):
return (bool(self), other)
if isinstance(other, str):
return (str(self), other)
if isinstance(other, unicode):
return (unicode(self), other)
# coercion between parm objects
#
if isinstance(other, FloatNodeParm):
return (float(self), float(other))
if isinstance(other, IntNodeParm):
return (int(self), int(other))
if isinstance(other, ToggleNodeParm):
return (bool(self), bool(other))
if isinstance(other, MenuNodeParm):
return (str(self), str(other))
if isinstance(other, StringNodeParm):
return (str(self), str(other))
if isinstance(other, HouParm):
return (id(self), id(other))
# unknown type to coerce to
return (self, False)
def __repr__(self):
return '<%s path "%s" value "%s" at %d>' % (self.__class__.__name__,
self._sesi_parm.path(),
str(self.get_value()), id(self))
class NodeParmTuple(object):
"""
This class represents a multi-parm tuple
on a HouNode (e.g. t,r,s on a geo node).
"""
def __init__(self, hou_node, tuple_name,
parm_template, sesi_parm_tuple):
"""
Initializer for a Node parameter tuple.
"""
self._hou_node = hou_node
self._tuple_name = tuple_name
self._parm_template = parm_template
self._sesi_parm_tuple = sesi_parm_tuple
self._tuple_method_names = []
self._sub_parms = [hou_node.get_node_parm(parm.name())
for parm in sesi_parm_tuple]
self.update_parm_methods()
def update_parm_methods(self, parm_method_names=None):
"""
Updates the list of SESI parm tuple method names
that can be called vicariously on a NodeParmTuple instance.
"""
if parm_method_names == None:
parm_method_names = dir(self._sesi_parm_tuple)
self._parm_method_names = parm_method_names
def get_value(self):
return self._sesi_parm_tuple.eval()
def set_value(self, *args):
if len(args) == 1:
args = args[0]
if isinstance(args, NodeParmTuple):
args = args.get_value()
return self._sesi_parm_tuple.set(*args)
def __len__(self):
return len(self._sub_parms)
def __getitem__(self, index):
return self._sub_parms[index]
def __setitem__(self, index, value):
return self._sub_parms[index].set_value(value)
def __iter__(self, index):
return iter(self._sub_parms)
def __repr__(self):
path = '%s/%s' % (self._hou_node.path(), self._tuple_name)
return '<%s path "%s" value "%s" at %d>' % (self.__class__.__name__,
path, str(self.get_value()),
id(self))
class NumericNodeParm(HouParm):
# override common math operators
def __add__(self, other):
return self.get_value() + self.CAST_TYPE(other)
def __sub__(self, other):
return self.get_value() - self.CAST_TYPE(other)
def __mul__(self, other):
return self.get_value() * self.CAST_TYPE(other)
def __floordiv__(self, other):
return self.get_value() // self.CAST_TYPE(other)
def __mod__(self, other):
return self.get_value() % self.CAST_TYPE(other)
def __pow__(self, other):
return self.get_value() ** self.CAST_TYPE(other)
def __lshift__(self, other):
return self.get_value() << other
def __rshift__(self, other):
return self.get_value() >> other
def __and__(self, other):
return self.get_value() & self.CAST_TYPE(other)
def __xor__(self, other):
return self.get_value() ^ self.CAST_TYPE(other)
def __or__(self, other):
return self.get_value() | self.CAST_TYPE(other)
def __div__(self, other):
return self.get_value() / self.CAST_TYPE(other)
def __truediv__(self, other):
return self.get_value() / self.CAST_TYPE(other)
# right side operations
def __radd__(self, other):
return self.CAST_TYPE(other) + self.get_value()
def __rsub__(self, other):
return self.CAST_TYPE(other) - self.get_value()
def __rmul__(self, other):
return self.CAST_TYPE(other) * self.get_value()
def __rdiv__(self, other):
return self.CAST_TYPE(other) / self.get_value()
def __rtruediv__(self, other):
return self.CAST_TYPE(other) / self.get_value()
def __rfloordiv__(self, other):
return self.CAST_TYPE(other) // self.get_value()
def __rmod__(self, other):
return self.CAST_TYPE(other) % self.get_value()
def __rpow__(self, other):
return self.CAST_TYPE(other) ** self.get_value()
def __rlshift__(self, other):
return other << self.get_value()
def __rrshift__(self, other):
return other >> self.get_value()
def __rand__(self, other):
return self.CAST_TYPE(other) & self.get_value()
def __rxor__(self, other):
return self.CAST_TYPE(other) ^ self.get_value()
def __ror__(self, other):
return self.CAST_TYPE(other) | self.get_value()
# inplace operations
def __iadd__(self, other):
return self.get_value() + self.CAST_TYPE(other)
def __isub__(self, other):
return self.get_value() - self.CAST_TYPE(other)
def __imul__(self, other):
return self.get_value() * self.CAST_TYPE(other)
def __idiv__(self, other):
return self.get_value() / self.CAST_TYPE(other)
def __itruediv__(self, other):
return self.get_value() / self.CAST_TYPE(other)
def __ifloordiv__(self, other):
return self.get_value() // self.CAST_TYPE(other)
def __imod__(self, other):
return self.get_value() % self.CAST_TYPE(other)
def __ipow__(self, other):
return self.get_value() ** self.CAST_TYPE(other)
def __ilshift__(self, other):
return self.get_value() << other
def __irshift__(self, other):
return self.get_value() >> other
def __iand__(self, other):
return self.get_value() & self.CAST_TYPE(other)
def __ixor__(self, other):
return self.get_value() ^ self.CAST_TYPE(other)
def __ior__(self, other):
return self.get_value() | self.CAST_TYPE(other)
def __neg__(self):
return -self.get_value()
def __pos__(self):
return +self.get_value()
def __abs__(self):
return abs(self.get_value())
def __invert__(self):
return ~self.get_value()
class IntNodeParm(NumericNodeParm):
SUPPORTED_TYPES = [hou.parmTemplateType.Int]
CAST_TYPE = int
class ToggleNodeParm(NumericNodeParm):
SUPPORTED_TYPES = [hou.parmTemplateType.Toggle]
CAST_TYPE = bool
class FloatNodeParm(NumericNodeParm):
SUPPORTED_TYPES = [hou.parmTemplateType.Float]
CAST_TYPE = float
class MenuNodeParm(HouParm):
SUPPORTED_TYPES = [hou.parmTemplateType.Menu]
class StringNodeParm(HouParm):
SUPPORTED_TYPES = [hou.parmTemplateType.String]
CAST_TYPE = str
@classmethod
def get_class_for_parm(cls, parm_template):
"""
Get a HouParm class based on the parameters string type.
"""
if not isinstance(parm_template, hou.StringParmTemplate):
raise Exception('Unknown parmeter template type "%s"'
% type(parm_template).__name__)
string_type = parm_template.stringType()
if string_type == hou.stringParmType.Regular:
return StringNodeParm
elif string_type == hou.stringParmType.FileReference:
return FileReferenceNodeParm
elif string_type == hou.stringParmType.NodeReference:
return NodeReferenceParm
elif string_type == hou.stringParmType.NodeReferenceList:
return NodeListReferenceParm
return cls
def expand(self, ignore_frame=False, ignore_names=None):
"""
Expands expression globals in this string parameter
optionally ignoring specific global variables.
"""
self._ignore_frame = ignore_frame
self._ignore_names = ignore_names
path = self._sesi_parm.unexpandedString()
return re.sub(r'\${?([a-zA-Z0-9_]+)}?', self._replace_var, path)
def _replace_var(self, match_obj):
"""
Replaces global variables in a path except frame place holders.
"""
original_str = match_obj.group(0)
var_name = match_obj.group(1)
if self._ignore_frame and re.match('F[0-9]?$', var_name):
return original_str
if self._ignore_names and var_name in self._ignore_names:
return original_str
if var_name:
value, err = hou.hscript('echo $%s' % var_name)
value = str(value).rstrip('\n')
if value and not err:
# remove trailing new line
return value
return original_str
class FileReferenceNodeParm(StringNodeParm):
"""
A parameter that references a file or file path.
"""
def create_directory(self):
"""
Creates any missing directories in this parameter file path.
"""
path = self._sesi_parm.evalAsString()
dir, file = os.path.split(path)
if '.' in file:
path = dir
if not os.path.exists(path):
os.makedirs(path)
def expand_path(self):
return self.expand(True)
class NodeReferenceParm(StringNodeParm):
"""
A parameter that references another Houdini node.
"""
def get_node(self):
value = self._sesi_parm.evalAsString()
return hou.node(value)
def get_hou_node(self):
from hou_node import get_hou_node
value = self._sesi_parm.evalAsString()
return get_hou_node(hou.node(value))
class NodeListReferenceParm(StringNodeParm):
"""
A multiple node reference parameter.
"""
def get_nodes(self):
str_value = self._sesi_parm.evalAsString()
values = str_value.split()
nodes = []
for value in values:
if value.startswith('@'):
node_bundle = hou.nodeBundle(value)
if node_bundle != None:
nodes.extend(node_bundle.nodes())
else:
node = hou.node(value)
if node:
nodes.append(node)
return nodes
def get_hou_nodes(self):
from hou_node import get_hou_node
return [get_hou_node(node) for node in self.get_nodes()]
| 35.35512 | 85 | 0.584607 |
import os
import re
import hou
class MetaHouParm(type):
"""
The MetaHouParm class is a metaclass that keeps track of which
classes support which parameter types.
"""
PARM_TYPE_TO_CLASS = dict()
def __new__(cls, class_name, bases, class_dict):
"""
Called when a class is created that uses this metaclass
"""
new_cls = type.__new__(cls, class_name, bases, class_dict)
supported_parm_types = class_dict.get('SUPPORTED_TYPES', [])
for parm_type in supported_parm_types:
cls.PARM_TYPE_TO_CLASS[parm_type] = new_cls
return new_cls
@classmethod
def get_node_parm(mcls, hou_node, parm_name):
"""
Instantiates and returns a HouParm object
to represent the specified node paramter.
"""
sesi_tuple = hou_node.parmTuple(parm_name)
if sesi_tuple and len(sesi_tuple) > 1:
parm_template = sesi_tuple.parmTemplate()
return NodeParmTuple(hou_node, parm_name,
parm_template, sesi_tuple)
sesi_parm = hou_node.parm(parm_name)
if not sesi_parm:
return None
parm_template = sesi_parm.parmTemplate()
parm_type = parm_template.type()
cls = mcls.PARM_TYPE_TO_CLASS.get(parm_type, HouParm)
cls = cls.get_class_for_parm(parm_template)
return cls(hou_node, parm_name, parm_template, sesi_parm)
class HouParm(object):
"""
The base class to represent a Houdini node parameter.
"""
__metaclass__ = MetaHouParm
SUPPORTED_TYPES = []
CAST_TYPE = None
def __init__(self, hou_node, parm_name,
parm_template, sesi_parm=None):
"""
Initializer for a Node parameter.
"""
self._hou_node = hou_node
self._parm_name = parm_name
self._parm_template = parm_template
self._sesi_parm = sesi_parm
self._cast_type = None
self._parm_method_names = []
self.update_node_methods()
@classmethod
def get_class_for_parm(cls, parm_template):
"""
Virtual method to control what HouParm class gets used
for the specified parmater template. After the class is identied
by the SUPPORTED_TYPES list.
"""
return cls
def update_node_methods(self, parm_method_names=None):
"""
Updates the list of SESI parm method names
that can be called vicariously on this HouParm instance.
"""
if parm_method_names == None:
parm_method_names = dir(self._sesi_parm)
self._parm_method_names = parm_method_names
def __getattr__(self, key):
"""
Overrides the attribute retrieval method for HouParm instances.
This method is only called if a python attribute or method of the
requested name was not found.
"""
if not key.startswith('_'):
method_names = self.__dict__.get('_parm_method_names', None)
sesi_parm = self.__dict__.get('_sesi_parm', None)
if sesi_parm and method_names and key in method_names:
return getattr(sesi_parm, key)
raise AttributeError('Unknown attribute "%s"' % key)
def get_value(self):
"""
Returns the value stored by this parameter object.
"""
return self._sesi_parm.eval()
def set_value(self, value):
"""
Sets the value stored by this parameter object.
"""
print value
if self.CAST_TYPE:
value = self.CAST_TYPE(value)
self._sesi_parm.set(value)
def __nonzero__(self):
"""
Handles cast to boolean.
"""
return bool(self.eval())
def __float__(self):
"""
Handles cast to float.
"""
return self.evalAsFloat()
def __int__(self):
"""
Handles cast to integer.
"""
return self.evalAsInt()
def __str__(self):
"""
Handles cast to string.
"""
return self.evalAsString()
def __unicode(self):
"""
Handles cast to unicode.
"""
return unicode(self.evalAsString())
def __coerce__(self, other):
"""
Handles cast to string.
"""
if other == None:
return (True, None)
if isinstance(other, float):
return (float(self), other)
if isinstance(other, int):
return (int(self), other)
if isinstance(other, bool):
return (bool(self), other)
if isinstance(other, str):
return (str(self), other)
if isinstance(other, unicode):
return (unicode(self), other)
if isinstance(other, FloatNodeParm):
return (float(self), float(other))
if isinstance(other, IntNodeParm):
return (int(self), int(other))
if isinstance(other, ToggleNodeParm):
return (bool(self), bool(other))
if isinstance(other, MenuNodeParm):
return (str(self), str(other))
if isinstance(other, StringNodeParm):
return (str(self), str(other))
if isinstance(other, HouParm):
return (id(self), id(other))
return (self, False)
def __repr__(self):
return '<%s path "%s" value "%s" at %d>' % (self.__class__.__name__,
self._sesi_parm.path(),
str(self.get_value()), id(self))
class NodeParmTuple(object):
"""
This class represents a multi-parm tuple
on a HouNode (e.g. t,r,s on a geo node).
"""
def __init__(self, hou_node, tuple_name,
parm_template, sesi_parm_tuple):
"""
Initializer for a Node parameter tuple.
"""
self._hou_node = hou_node
self._tuple_name = tuple_name
self._parm_template = parm_template
self._sesi_parm_tuple = sesi_parm_tuple
self._tuple_method_names = []
self._sub_parms = [hou_node.get_node_parm(parm.name())
for parm in sesi_parm_tuple]
self.update_parm_methods()
def update_parm_methods(self, parm_method_names=None):
"""
Updates the list of SESI parm tuple method names
that can be called vicariously on a NodeParmTuple instance.
"""
if parm_method_names == None:
parm_method_names = dir(self._sesi_parm_tuple)
self._parm_method_names = parm_method_names
def get_value(self):
return self._sesi_parm_tuple.eval()
def set_value(self, *args):
if len(args) == 1:
args = args[0]
if isinstance(args, NodeParmTuple):
args = args.get_value()
return self._sesi_parm_tuple.set(*args)
def __len__(self):
return len(self._sub_parms)
def __getitem__(self, index):
return self._sub_parms[index]
def __setitem__(self, index, value):
return self._sub_parms[index].set_value(value)
def __iter__(self, index):
return iter(self._sub_parms)
def __repr__(self):
path = '%s/%s' % (self._hou_node.path(), self._tuple_name)
return '<%s path "%s" value "%s" at %d>' % (self.__class__.__name__,
path, str(self.get_value()),
id(self))
class NumericNodeParm(HouParm):
def __add__(self, other):
return self.get_value() + self.CAST_TYPE(other)
def __sub__(self, other):
return self.get_value() - self.CAST_TYPE(other)
def __mul__(self, other):
return self.get_value() * self.CAST_TYPE(other)
def __floordiv__(self, other):
return self.get_value() // self.CAST_TYPE(other)
def __mod__(self, other):
return self.get_value() % self.CAST_TYPE(other)
def __pow__(self, other):
return self.get_value() ** self.CAST_TYPE(other)
def __lshift__(self, other):
return self.get_value() << other
def __rshift__(self, other):
return self.get_value() >> other
def __and__(self, other):
return self.get_value() & self.CAST_TYPE(other)
def __xor__(self, other):
return self.get_value() ^ self.CAST_TYPE(other)
def __or__(self, other):
return self.get_value() | self.CAST_TYPE(other)
def __div__(self, other):
return self.get_value() / self.CAST_TYPE(other)
def __truediv__(self, other):
return self.get_value() / self.CAST_TYPE(other)
def __radd__(self, other):
return self.CAST_TYPE(other) + self.get_value()
def __rsub__(self, other):
return self.CAST_TYPE(other) - self.get_value()
def __rmul__(self, other):
return self.CAST_TYPE(other) * self.get_value()
def __rdiv__(self, other):
return self.CAST_TYPE(other) / self.get_value()
def __rtruediv__(self, other):
return self.CAST_TYPE(other) / self.get_value()
def __rfloordiv__(self, other):
return self.CAST_TYPE(other) // self.get_value()
def __rmod__(self, other):
return self.CAST_TYPE(other) % self.get_value()
def __rpow__(self, other):
return self.CAST_TYPE(other) ** self.get_value()
def __rlshift__(self, other):
return other << self.get_value()
def __rrshift__(self, other):
return other >> self.get_value()
def __rand__(self, other):
return self.CAST_TYPE(other) & self.get_value()
def __rxor__(self, other):
return self.CAST_TYPE(other) ^ self.get_value()
def __ror__(self, other):
return self.CAST_TYPE(other) | self.get_value()
def __iadd__(self, other):
return self.get_value() + self.CAST_TYPE(other)
def __isub__(self, other):
return self.get_value() - self.CAST_TYPE(other)
def __imul__(self, other):
return self.get_value() * self.CAST_TYPE(other)
def __idiv__(self, other):
return self.get_value() / self.CAST_TYPE(other)
def __itruediv__(self, other):
return self.get_value() / self.CAST_TYPE(other)
def __ifloordiv__(self, other):
return self.get_value() // self.CAST_TYPE(other)
def __imod__(self, other):
return self.get_value() % self.CAST_TYPE(other)
def __ipow__(self, other):
return self.get_value() ** self.CAST_TYPE(other)
def __ilshift__(self, other):
return self.get_value() << other
def __irshift__(self, other):
return self.get_value() >> other
def __iand__(self, other):
return self.get_value() & self.CAST_TYPE(other)
def __ixor__(self, other):
return self.get_value() ^ self.CAST_TYPE(other)
def __ior__(self, other):
return self.get_value() | self.CAST_TYPE(other)
def __neg__(self):
return -self.get_value()
def __pos__(self):
return +self.get_value()
def __abs__(self):
return abs(self.get_value())
def __invert__(self):
return ~self.get_value()
class IntNodeParm(NumericNodeParm):
SUPPORTED_TYPES = [hou.parmTemplateType.Int]
CAST_TYPE = int
class ToggleNodeParm(NumericNodeParm):
SUPPORTED_TYPES = [hou.parmTemplateType.Toggle]
CAST_TYPE = bool
class FloatNodeParm(NumericNodeParm):
SUPPORTED_TYPES = [hou.parmTemplateType.Float]
CAST_TYPE = float
class MenuNodeParm(HouParm):
SUPPORTED_TYPES = [hou.parmTemplateType.Menu]
class StringNodeParm(HouParm):
SUPPORTED_TYPES = [hou.parmTemplateType.String]
CAST_TYPE = str
@classmethod
def get_class_for_parm(cls, parm_template):
"""
Get a HouParm class based on the parameters string type.
"""
if not isinstance(parm_template, hou.StringParmTemplate):
raise Exception('Unknown parmeter template type "%s"'
% type(parm_template).__name__)
string_type = parm_template.stringType()
if string_type == hou.stringParmType.Regular:
return StringNodeParm
elif string_type == hou.stringParmType.FileReference:
return FileReferenceNodeParm
elif string_type == hou.stringParmType.NodeReference:
return NodeReferenceParm
elif string_type == hou.stringParmType.NodeReferenceList:
return NodeListReferenceParm
return cls
def expand(self, ignore_frame=False, ignore_names=None):
"""
Expands expression globals in this string parameter
optionally ignoring specific global variables.
"""
self._ignore_frame = ignore_frame
self._ignore_names = ignore_names
path = self._sesi_parm.unexpandedString()
return re.sub(r'\${?([a-zA-Z0-9_]+)}?', self._replace_var, path)
def _replace_var(self, match_obj):
"""
Replaces global variables in a path except frame place holders.
"""
original_str = match_obj.group(0)
var_name = match_obj.group(1)
if self._ignore_frame and re.match('F[0-9]?$', var_name):
return original_str
if self._ignore_names and var_name in self._ignore_names:
return original_str
if var_name:
value, err = hou.hscript('echo $%s' % var_name)
value = str(value).rstrip('\n')
if value and not err:
return value
return original_str
class FileReferenceNodeParm(StringNodeParm):
"""
A parameter that references a file or file path.
"""
def create_directory(self):
"""
Creates any missing directories in this parameter file path.
"""
path = self._sesi_parm.evalAsString()
dir, file = os.path.split(path)
if '.' in file:
path = dir
if not os.path.exists(path):
os.makedirs(path)
def expand_path(self):
return self.expand(True)
class NodeReferenceParm(StringNodeParm):
"""
A parameter that references another Houdini node.
"""
def get_node(self):
value = self._sesi_parm.evalAsString()
return hou.node(value)
def get_hou_node(self):
from hou_node import get_hou_node
value = self._sesi_parm.evalAsString()
return get_hou_node(hou.node(value))
class NodeListReferenceParm(StringNodeParm):
"""
A multiple node reference parameter.
"""
def get_nodes(self):
str_value = self._sesi_parm.evalAsString()
values = str_value.split()
nodes = []
for value in values:
if value.startswith('@'):
node_bundle = hou.nodeBundle(value)
if node_bundle != None:
nodes.extend(node_bundle.nodes())
else:
node = hou.node(value)
if node:
nodes.append(node)
return nodes
def get_hou_nodes(self):
from hou_node import get_hou_node
return [get_hou_node(node) for node in self.get_nodes()]
| false | true |
f7f58a9be17de85b79d5dbeed2f7997e9169472e | 1,060 | py | Python | extraPackages/matplotlib-3.0.3/examples/pie_and_polar_charts/polar_legend.py | dolboBobo/python3_ios | 877f8c2c5890f26292ddd14909bea62a04fe2889 | [
"BSD-3-Clause"
] | 130 | 2018-02-03T10:25:54.000Z | 2022-03-25T22:27:22.000Z | extraPackages/matplotlib-3.0.2/examples/pie_and_polar_charts/polar_legend.py | spacetime314/python3_ios | e149f1bc2e50046c8810f83dae7739a8dea939ee | [
"BSD-3-Clause"
] | 9 | 2018-12-14T07:31:42.000Z | 2020-12-09T20:29:28.000Z | extraPackages/matplotlib-3.0.2/examples/pie_and_polar_charts/polar_legend.py | spacetime314/python3_ios | e149f1bc2e50046c8810f83dae7739a8dea939ee | [
"BSD-3-Clause"
] | 64 | 2018-04-25T08:51:57.000Z | 2022-01-29T14:13:57.000Z | """
============
Polar Legend
============
Demo of a legend on a polar-axis plot.
"""
import matplotlib.pyplot as plt
import numpy as np
# radar green, solid grid lines
plt.rc('grid', color='#316931', linewidth=1, linestyle='-')
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
# force square figure and square axes looks better for polar, IMO
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection='polar', facecolor='#d5de9c')
r = np.arange(0, 3.0, 0.01)
theta = 2 * np.pi * r
ax.plot(theta, r, color='#ee8d18', lw=3, label='a line')
ax.plot(0.5 * theta, r, color='blue', ls='--', lw=3, label='another line')
ax.legend()
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
import matplotlib
matplotlib.axes.Axes.plot
matplotlib.axes.Axes.legend
matplotlib.projections.polar
matplotlib.projections.polar.PolarAxes
| 23.555556 | 77 | 0.601887 |
import matplotlib.pyplot as plt
import numpy as np
plt.rc('grid', color='#316931', linewidth=1, linestyle='-')
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection='polar', facecolor='#d5de9c')
r = np.arange(0, 3.0, 0.01)
theta = 2 * np.pi * r
ax.plot(theta, r, color='#ee8d18', lw=3, label='a line')
ax.plot(0.5 * theta, r, color='blue', ls='--', lw=3, label='another line')
ax.legend()
plt.show()
| true | true |
f7f58b45440a505c55f2ede98b7a12a7c8c062c0 | 30,778 | py | Python | plugin/documents.py | mitranim/LSP | 3d4aebe1959b308a97e8464e7d278e067dffd545 | [
"MIT"
] | null | null | null | plugin/documents.py | mitranim/LSP | 3d4aebe1959b308a97e8464e7d278e067dffd545 | [
"MIT"
] | null | null | null | plugin/documents.py | mitranim/LSP | 3d4aebe1959b308a97e8464e7d278e067dffd545 | [
"MIT"
] | null | null | null | from .code_actions import actions_manager
from .code_actions import CodeActionsByConfigName
from .completion import LspResolveDocsCommand
from .completion import resolve
from .core.css import css
from .core.logging import debug
from .core.protocol import Diagnostic
from .core.protocol import DocumentHighlightKind
from .core.protocol import Range
from .core.protocol import Request
from .core.registry import best_session
from .core.registry import windows
from .core.sessions import Session
from .core.settings import userprefs
from .core.signature_help import create_signature_help
from .core.signature_help import SignatureHelp
from .core.types import basescope2languageid
from .core.types import debounced
from .core.typing import Any, Callable, Optional, Dict, Generator, Iterable, List, Tuple, Union
from .core.views import DIAGNOSTIC_SEVERITY
from .core.views import document_color_params
from .core.views import format_completion
from .core.views import FORMAT_MARKUP_CONTENT
from .core.views import FORMAT_STRING
from .core.views import lsp_color_to_phantom
from .core.views import make_command_link
from .core.views import minihtml
from .core.views import range_to_region
from .core.views import region_to_range
from .core.views import text_document_position_params
from .core.windows import AbstractViewListener
from .core.windows import WindowManager
from .diagnostics import filter_by_range
from .diagnostics import view_diagnostics
from .session_buffer import SessionBuffer
from .session_view import SessionView
from weakref import WeakSet
from weakref import WeakValueDictionary
import html
import mdpopups
import sublime
import sublime_plugin
import webbrowser
SUBLIME_WORD_MASK = 515
_kind2name = {
DocumentHighlightKind.Unknown: "unknown",
DocumentHighlightKind.Text: "text",
DocumentHighlightKind.Read: "read",
DocumentHighlightKind.Write: "write"
}
def is_regular_view(v: sublime.View) -> bool:
# Not from the quick panel (CTRL+P), must have a filename on-disk, and not a special view like a console,
# output panel or find-in-files panels.
return not v.sheet().is_transient() and bool(v.file_name()) and v.element() is None
def previous_non_whitespace_char(view: sublime.View, pt: int) -> str:
prev = view.substr(pt - 1)
if prev.isspace():
return view.substr(view.find_by_class(pt, False, ~0) - 1)
return prev
class ColorSchemeScopeRenderer:
def __init__(self, view: sublime.View) -> None:
self._scope_styles = {} # type: dict
self._view = view
for scope in ["entity.name.function", "variable.parameter", "punctuation"]:
self._scope_styles[scope] = mdpopups.scope2style(view, scope)
def function(self, content: str, escape: bool = True) -> str:
return self._wrap_with_scope_style(content, "entity.name.function", escape=escape)
def punctuation(self, content: str) -> str:
return self._wrap_with_scope_style(content, "punctuation")
def parameter(self, content: str, emphasize: bool = False) -> str:
return self._wrap_with_scope_style(content, "variable.parameter", emphasize)
def markup(self, content: Union[str, Dict[str, str]]) -> str:
return minihtml(self._view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT)
def _wrap_with_scope_style(self, content: str, scope: str, emphasize: bool = False, escape: bool = True) -> str:
color = self._scope_styles[scope]["color"]
additional_styles = 'font-weight: bold; text-decoration: underline;' if emphasize else ''
content = html.escape(content, quote=False) if escape else content
return '<span style="color: {};{}">{}</span>'.format(color, additional_styles, content)
class TextChangeListener(sublime_plugin.TextChangeListener):
ids_to_listeners = WeakValueDictionary() # type: WeakValueDictionary[int, TextChangeListener]
@classmethod
def is_applicable(cls, buffer: sublime.Buffer) -> bool:
v = buffer.primary_view()
return v is not None and is_regular_view(v)
def __init__(self) -> None:
super().__init__()
self.view_listeners = WeakSet() # type: WeakSet[DocumentSyncListener]
def attach(self, buffer: sublime.Buffer) -> None:
super().attach(buffer)
self.ids_to_listeners[self.buffer.buffer_id] = self
def detach(self) -> None:
self.ids_to_listeners.pop(self.buffer.buffer_id, None)
super().detach()
def on_text_changed(self, changes: Iterable[sublime.TextChange]) -> None:
view = self.buffer.primary_view()
if not view:
return
change_count = view.change_count()
frozen_listeners = WeakSet(self.view_listeners)
def notify() -> None:
for listener in list(frozen_listeners):
listener.on_text_changed_async(change_count, changes)
sublime.set_timeout_async(notify)
def on_reload_async(self) -> None:
for listener in list(self.view_listeners):
listener.on_reload_async()
def on_revert_async(self) -> None:
for listener in list(self.view_listeners):
listener.on_revert_async()
def __repr__(self) -> str:
return "TextChangeListener({})".format(self.buffer.buffer_id)
class DocumentSyncListener(sublime_plugin.ViewEventListener, AbstractViewListener):
CODE_ACTIONS_KEY = "lsp_code_action"
ACTIVE_DIAGNOSTIC = "lsp_active_diagnostic"
code_actions_debounce_time = 800
color_boxes_debounce_time = 500
highlights_debounce_time = 300
@classmethod
def applies_to_primary_view_only(cls) -> bool:
return False
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self._manager = None # type: Optional[WindowManager]
self._session_views = {} # type: Dict[str, SessionView]
self._stored_region = sublime.Region(-1, -1)
self._color_phantoms = sublime.PhantomSet(self.view, "lsp_color")
self._sighelp = None # type: Optional[SignatureHelp]
self._sighelp_renderer = ColorSchemeScopeRenderer(self.view)
self._language_id = ""
self._registered = False
def __del__(self) -> None:
settings = self.view.settings()
triggers = settings.get("auto_complete_triggers") or [] # type: List[Dict[str, str]]
triggers = [trigger for trigger in triggers if 'server' not in trigger]
settings.set("auto_complete_triggers", triggers)
self._stored_region = sublime.Region(-1, -1)
self._color_phantoms.update([])
self.view.erase_status(AbstractViewListener.TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY)
self._clear_highlight_regions()
self._clear_session_views_async()
# --- Implements AbstractViewListener ------------------------------------------------------------------------------
def on_session_initialized_async(self, session: Session) -> None:
assert not self.view.is_loading()
added = False
if session.config.name not in self._session_views:
self._session_views[session.config.name] = SessionView(self, session)
buf = self.view.buffer()
if buf:
text_change_listener = TextChangeListener.ids_to_listeners.get(buf.buffer_id)
if text_change_listener:
text_change_listener.view_listeners.add(self)
self.view.settings().set("lsp_active", True)
added = True
if added:
if "colorProvider" not in userprefs().disabled_capabilities:
self._do_color_boxes_async()
def on_session_shutdown_async(self, session: Session) -> None:
removed_session = self._session_views.pop(session.config.name, None)
if removed_session:
if not self._session_views:
self.view.settings().erase("lsp_active")
self._registered = False
else:
# SessionView was likely not created for this config so remove status here.
session.config.erase_view_status(self.view)
def diagnostics_panel_contribution_async(self) -> List[str]:
result = [] # type: List[str]
# Sort by severity
for severity in range(1, len(DIAGNOSTIC_SEVERITY) + 1):
for sb in self.session_buffers_async():
data = sb.data_per_severity.get(severity)
if data:
result.extend(data.panel_contribution)
return result
def diagnostics_async(self) -> Generator[Tuple[SessionBuffer, List[Diagnostic]], None, None]:
for sb in self.session_buffers_async():
yield sb, sb.diagnostics
def on_diagnostics_updated_async(self) -> None:
self._clear_code_actions_annotation()
self._do_code_actions()
self._update_diagnostic_in_status_bar_async()
def _update_diagnostic_in_status_bar_async(self) -> None:
if userprefs().show_diagnostics_in_view_status:
r = self._get_current_range_async()
if r is not None:
diags_by_config_name, _ = self.diagnostics_intersecting_range_async(r)
if diags_by_config_name:
for diags in diags_by_config_name.values():
diag = next(iter(diags), None)
if diag:
self.view.set_status(self.ACTIVE_DIAGNOSTIC, diag.message)
return
self.view.erase_status(self.ACTIVE_DIAGNOSTIC)
def session_views_async(self) -> Generator[SessionView, None, None]:
yield from self._session_views.values()
def session_buffers_async(self) -> Generator[SessionBuffer, None, None]:
for sv in self.session_views_async():
yield sv.session_buffer
def on_text_changed_async(self, change_count: int, changes: Iterable[sublime.TextChange]) -> None:
different, current_region = self._update_stored_region_async()
if self.view.is_primary():
for sv in self.session_views_async():
sv.on_text_changed_async(change_count, changes)
if not different:
return
if "documentHighlight" not in userprefs().disabled_capabilities:
self._clear_highlight_regions()
self._when_selection_remains_stable_async(self._do_highlights_async, current_region,
after_ms=self.highlights_debounce_time)
if "colorProvider" not in userprefs().disabled_capabilities:
self._when_selection_remains_stable_async(self._do_color_boxes_async, current_region,
after_ms=self.color_boxes_debounce_time)
if "signatureHelp" not in userprefs().disabled_capabilities:
self._do_signature_help(manual=False)
def on_revert_async(self) -> None:
if self.view.is_primary():
for sv in self.session_views_async():
sv.on_revert_async()
def on_reload_async(self) -> None:
if self.view.is_primary():
for sv in self.session_views_async():
sv.on_reload_async()
def get_language_id(self) -> str:
return self._language_id
# --- Callbacks from Sublime Text ----------------------------------------------------------------------------------
def on_load_async(self) -> None:
if not self._registered and is_regular_view(self.view):
self._register_async()
def on_activated_async(self) -> None:
if not self._registered and not self.view.is_loading() and is_regular_view(self.view):
self._register_async()
def on_selection_modified_async(self) -> None:
different, current_region = self._update_stored_region_async()
if different:
if "documentHighlight" not in userprefs().disabled_capabilities:
if not self._is_in_higlighted_region(current_region.b):
self._clear_highlight_regions()
self._when_selection_remains_stable_async(self._do_highlights_async, current_region,
after_ms=self.highlights_debounce_time)
self._clear_code_actions_annotation()
self._when_selection_remains_stable_async(self._do_code_actions, current_region,
after_ms=self.code_actions_debounce_time)
self._update_diagnostic_in_status_bar_async()
def on_post_save_async(self) -> None:
if self.view.is_primary():
for sv in self.session_views_async():
sv.on_post_save_async()
def on_close(self) -> None:
self._clear_session_views_async()
def on_query_context(self, key: str, operator: int, operand: Any, match_all: bool) -> bool:
if key == "lsp.session_with_capability" and operator == sublime.OP_EQUAL and isinstance(operand, str):
capabilities = [s.strip() for s in operand.split("|")]
for capability in capabilities:
if any(self.sessions(capability)):
return True
return False
elif key in ("lsp.sessions", "setting.lsp_active"):
return bool(self._session_views)
elif key == "lsp.signature_help":
if not self.view.is_popup_visible():
if operand == 0:
sublime.set_timeout_async(lambda: self._do_signature_help(manual=True))
return True
elif self._sighelp and self._sighelp.has_multiple_signatures() and not self.view.is_auto_complete_visible():
# We use the "operand" for the number -1 or +1. See the keybindings.
self._sighelp.select_signature(operand)
self._update_sighelp_popup(self._sighelp.build_popup_content(self._sighelp_renderer))
return True # We handled this keybinding.
return False
def on_hover(self, point: int, hover_zone: int) -> None:
if (hover_zone != sublime.HOVER_TEXT
or self.view.is_popup_visible()
or "hover" in userprefs().disabled_capabilities):
return
self.view.run_command("lsp_hover", {"point": point})
def on_post_text_command(self, command_name: str, args: Optional[Dict[str, Any]]) -> None:
if command_name in ("next_field", "prev_field") and args is None:
if "signatureHelp" not in userprefs().disabled_capabilities:
sublime.set_timeout_async(lambda: self._do_signature_help(manual=True))
if not self.view.is_popup_visible():
return
if command_name in ["hide_auto_complete", "move", "commit_completion"] or 'delete' in command_name:
# hide the popup when `esc` or arrows are pressed pressed
self.view.hide_popup()
def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:
if "completion" in userprefs().disabled_capabilities:
return None
promise = sublime.CompletionList()
sublime.set_timeout_async(lambda: self._on_query_completions_async(promise, locations[0]))
return promise
# --- textDocument/signatureHelp -----------------------------------------------------------------------------------
def _do_signature_help(self, manual: bool) -> None:
# NOTE: We take the beginning of the region to check the previous char (see last_char variable). This is for
# when a language server inserts a snippet completion.
pos = self._stored_region.a
if pos == -1:
return
if not self.view.match_selector(pos, self.view.settings().get("auto_complete_selector") or ""): # ???
return
session = self.session("signatureHelpProvider")
if not session:
return
triggers = session.get_capability("signatureHelpProvider.triggerCharacters") or []
if not manual and not triggers:
return
last_char = previous_non_whitespace_char(self.view, pos)
if manual or last_char in triggers:
self.purge_changes_async()
params = text_document_position_params(self.view, pos)
assert session
session.send_request_async(
Request.signatureHelp(params, self.view), lambda resp: self._on_signature_help(resp, pos))
else:
# TODO: Refactor popup usage to a common class. We now have sigHelp, completionDocs, hover, and diags
# all using a popup. Most of these systems assume they have exclusive access to a popup, while in
# reality there is only one popup per view.
self.view.hide_popup()
self._sighelp = None
def _on_signature_help(self, response: Optional[Dict], point: int) -> None:
self._sighelp = create_signature_help(response)
if self._sighelp:
content = self._sighelp.build_popup_content(self._sighelp_renderer)
def render_sighelp_on_main_thread() -> None:
if self.view.is_popup_visible():
self._update_sighelp_popup(content)
else:
self._show_sighelp_popup(content, point)
sublime.set_timeout(render_sighelp_on_main_thread)
def _show_sighelp_popup(self, content: str, point: int) -> None:
# TODO: There are a bunch of places in the code where we assume we have exclusive access to a popup. The reality
# is that there is really only one popup per view. Refactor everything that interacts with the popup to a common
# class.
flags = 0
flags |= sublime.HIDE_ON_MOUSE_MOVE_AWAY
flags |= sublime.COOPERATE_WITH_AUTO_COMPLETE
mdpopups.show_popup(self.view,
content,
css=css().popups,
md=True,
flags=flags,
location=point,
wrapper_class=css().popups_classname,
max_width=800,
on_hide=self._on_sighelp_hide,
on_navigate=self._on_sighelp_navigate)
self._visible = True
def _update_sighelp_popup(self, content: str) -> None:
mdpopups.update_popup(self.view,
content,
css=css().popups,
md=True,
wrapper_class=css().popups_classname)
def _on_sighelp_hide(self) -> None:
self._visible = False
def _on_sighelp_navigate(self, href: str) -> None:
webbrowser.open_new_tab(href)
# --- textDocument/codeAction --------------------------------------------------------------------------------------
def _do_code_actions(self) -> None:
stored_range = region_to_range(self.view, self._stored_region)
diagnostics_by_config, extended_range = filter_by_range(view_diagnostics(self.view), stored_range)
actions_manager.request_for_range_async(self.view, extended_range, diagnostics_by_config, self._on_code_actions)
def _on_code_actions(self, responses: CodeActionsByConfigName) -> None:
action_count = sum(map(len, responses.values()))
if action_count == 0:
return
regions = [sublime.Region(self._stored_region.b, self._stored_region.a)]
scope = ""
icon = ""
flags = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE
annotations = []
annotation_color = ""
if userprefs().show_code_actions == 'bulb':
scope = 'markup.changed'
icon = 'Packages/LSP/icons/lightbulb.png'
else: # 'annotation'
suffix = 's' if action_count > 1 else ''
code_actions_link = make_command_link('lsp_code_actions', '{} code action{}'.format(action_count, suffix))
annotations = ["<div class=\"actions\">{}</div>".format(code_actions_link)]
annotation_color = '#2196F3'
self.view.add_regions(self.CODE_ACTIONS_KEY, regions, scope, icon, flags, annotations, annotation_color)
def _clear_code_actions_annotation(self) -> None:
self.view.erase_regions(self.CODE_ACTIONS_KEY)
# --- textDocument/documentColor -----------------------------------------------------------------------------------
def _do_color_boxes_async(self) -> None:
session = self.session("colorProvider")
if session:
session.send_request_async(
Request.documentColor(document_color_params(self.view), self.view), self._on_color_boxes)
def _on_color_boxes(self, response: Any) -> None:
color_infos = response if response else []
self._color_phantoms.update([lsp_color_to_phantom(self.view, color_info) for color_info in color_infos])
# --- textDocument/documentHighlight -------------------------------------------------------------------------------
def _clear_highlight_regions(self) -> None:
for kind in userprefs().document_highlight_scopes.keys():
self.view.erase_regions("lsp_highlight_{}".format(kind))
def _is_in_higlighted_region(self, point: int) -> bool:
for kind in userprefs().document_highlight_scopes.keys():
regions = self.view.get_regions("lsp_highlight_{}".format(kind))
for r in regions:
if r.contains(point):
return True
return False
def _do_highlights_async(self) -> None:
if not len(self.view.sel()):
return
point = self.view.sel()[0].b
session = self.session("documentHighlightProvider", point)
if session:
params = text_document_position_params(self.view, point)
request = Request.documentHighlight(params, self.view)
session.send_request_async(request, self._on_highlights)
def _on_highlights(self, response: Optional[List]) -> None:
if not response:
self._clear_highlight_regions()
return
kind2regions = {} # type: Dict[str, List[sublime.Region]]
for kind in range(0, 4):
kind2regions[_kind2name[kind]] = []
for highlight in response:
r = range_to_region(Range.from_lsp(highlight["range"]), self.view)
kind = highlight.get("kind", DocumentHighlightKind.Unknown)
if kind is not None:
kind2regions[_kind2name[kind]].append(r)
def render_highlights_on_main_thread() -> None:
self._clear_highlight_regions()
flags = userprefs().document_highlight_style_to_add_regions_flags()
for kind_str, regions in kind2regions.items():
if regions:
scope = userprefs().document_highlight_scopes.get(kind_str, None)
if scope:
self.view.add_regions("lsp_highlight_{}".format(kind_str), regions, scope=scope, flags=flags)
sublime.set_timeout(render_highlights_on_main_thread)
# --- textDocument/complete ----------------------------------------------------------------------------------------
def _on_query_completions_async(self, promise: sublime.CompletionList, location: int) -> None:
session = self.session('completionProvider', location)
if not session:
resolve(promise, [])
return
self.purge_changes_async()
can_resolve_completion_items = bool(session.get_capability('completionProvider.resolveProvider'))
config_name = session.config.name
session.send_request_async(
Request.complete(text_document_position_params(self.view, location), self.view),
lambda res: self._on_complete_result(res, promise, can_resolve_completion_items, config_name),
lambda res: self._on_complete_error(res, promise))
def _on_complete_result(self, response: Optional[Union[dict, List]], completion_list: sublime.CompletionList,
can_resolve_completion_items: bool, session_name: str) -> None:
response_items = [] # type: List[Dict]
flags = 0
prefs = userprefs()
if prefs.inhibit_snippet_completions:
flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS
if prefs.inhibit_word_completions:
flags |= sublime.INHIBIT_WORD_COMPLETIONS
if isinstance(response, dict):
response_items = response["items"] or []
if response.get("isIncomplete", False):
flags |= sublime.DYNAMIC_COMPLETIONS
elif isinstance(response, list):
response_items = response
response_items = sorted(response_items, key=lambda item: item.get("sortText") or item["label"])
LspResolveDocsCommand.completions = response_items
items = [format_completion(response_item, index, can_resolve_completion_items, session_name)
for index, response_item in enumerate(response_items)]
if items:
flags |= sublime.INHIBIT_REORDER
resolve(completion_list, items, flags)
def _on_complete_error(self, error: dict, completion_list: sublime.CompletionList) -> None:
resolve(completion_list, [])
LspResolveDocsCommand.completions = []
sublime.status_message('Completion error: ' + str(error.get('message')))
# --- Public utility methods ---------------------------------------------------------------------------------------
@property
def manager(self) -> WindowManager: # TODO: Return type is an Optional[WindowManager] !
if not self._manager:
window = self.view.window()
if window:
self._manager = windows.lookup(window)
return self._manager # type: ignore
def sessions(self, capability: Optional[str]) -> Generator[Session, None, None]:
for sb in self.session_buffers_async():
if capability is None or sb.has_capability(capability):
yield sb.session
def session(self, capability: str, point: Optional[int] = None) -> Optional[Session]:
return best_session(self.view, self.sessions(capability), point)
def get_capability_async(self, session: Session, capability_path: str) -> Optional[Any]:
for sv in self.session_views_async():
if sv.session == session:
return sv.get_capability(capability_path)
return None
def has_capability_async(self, session: Session, capability_path: str) -> bool:
for sv in self.session_views_async():
if sv.session == session:
return sv.has_capability(capability_path)
return False
def purge_changes_async(self) -> None:
for sv in self.session_views_async():
sv.purge_changes_async()
def trigger_on_pre_save_async(self) -> None:
for sv in self.session_views_async():
sv.on_pre_save_async(self.view.file_name() or "")
def diagnostics_intersecting_range_async(self, r: Range) -> Tuple[Dict[str, List[Diagnostic]], Range]:
return filter_by_range(self.diagnostics_async(), r)
def sum_total_errors_and_warnings_async(self) -> Tuple[int, int]:
errors = 0
warnings = 0
for sb in self.session_buffers_async():
errors += sb.total_errors
warnings += sb.total_warnings
return errors, warnings
# --- Private utility methods --------------------------------------------------------------------------------------
def _when_selection_remains_stable_async(self, f: Callable[[], None], r: sublime.Region, after_ms: int) -> None:
debounced(f, after_ms, lambda: self._stored_region == r, async_thread=True)
def _register_async(self) -> None:
syntax = self.view.syntax()
if not syntax:
debug("view", self.view.id(), "has no syntax")
return
self._language_id = basescope2languageid(syntax.scope)
buf = self.view.buffer()
if not buf:
debug("not tracking bufferless view", self.view.id())
return
text_change_listener = TextChangeListener.ids_to_listeners.get(buf.buffer_id)
if not text_change_listener:
debug("couldn't find a text change listener for", self)
return
self._registered = True
self.manager.register_listener_async(self)
views = buf.views()
if not isinstance(views, list):
debug("skipping clone checks for", self)
return
self_id = self.view.id()
for view in views:
view_id = view.id()
if view_id == self_id:
continue
listeners = list(sublime_plugin.view_event_listeners[view_id])
for listener in listeners:
if isinstance(listener, DocumentSyncListener):
debug("also registering", listener)
listener.on_load_async()
def _update_stored_region_async(self) -> Tuple[bool, sublime.Region]:
"""
Stores the current first selection in a variable.
Note that due to this function (supposedly) running in the async worker thread of ST, it can happen that the
view is already closed. In that case it returns Region(-1, -1). It also returns that value if there's no first
selection.
:returns: A tuple with two elements. The second element is the new region, the first element signals whether
the previous region was different from the newly stored region.
"""
current_region = self._get_current_region_async()
if current_region is not None:
if self._stored_region != current_region:
self._stored_region = current_region
return True, current_region
return False, sublime.Region(-1, -1)
def _get_current_region_async(self) -> Optional[sublime.Region]:
try:
return self.view.sel()[0]
except IndexError:
return None
def _get_current_range_async(self) -> Optional[Range]:
region = self._get_current_region_async()
if region is None:
return None
return region_to_range(self.view, region)
def _clear_session_views_async(self) -> None:
session_views = self._session_views
def clear_async() -> None:
nonlocal session_views
session_views.clear()
sublime.set_timeout_async(clear_async)
def __repr__(self) -> str:
return "ViewListener({})".format(self.view.id())
| 45.195301 | 120 | 0.637371 | from .code_actions import actions_manager
from .code_actions import CodeActionsByConfigName
from .completion import LspResolveDocsCommand
from .completion import resolve
from .core.css import css
from .core.logging import debug
from .core.protocol import Diagnostic
from .core.protocol import DocumentHighlightKind
from .core.protocol import Range
from .core.protocol import Request
from .core.registry import best_session
from .core.registry import windows
from .core.sessions import Session
from .core.settings import userprefs
from .core.signature_help import create_signature_help
from .core.signature_help import SignatureHelp
from .core.types import basescope2languageid
from .core.types import debounced
from .core.typing import Any, Callable, Optional, Dict, Generator, Iterable, List, Tuple, Union
from .core.views import DIAGNOSTIC_SEVERITY
from .core.views import document_color_params
from .core.views import format_completion
from .core.views import FORMAT_MARKUP_CONTENT
from .core.views import FORMAT_STRING
from .core.views import lsp_color_to_phantom
from .core.views import make_command_link
from .core.views import minihtml
from .core.views import range_to_region
from .core.views import region_to_range
from .core.views import text_document_position_params
from .core.windows import AbstractViewListener
from .core.windows import WindowManager
from .diagnostics import filter_by_range
from .diagnostics import view_diagnostics
from .session_buffer import SessionBuffer
from .session_view import SessionView
from weakref import WeakSet
from weakref import WeakValueDictionary
import html
import mdpopups
import sublime
import sublime_plugin
import webbrowser
SUBLIME_WORD_MASK = 515
_kind2name = {
DocumentHighlightKind.Unknown: "unknown",
DocumentHighlightKind.Text: "text",
DocumentHighlightKind.Read: "read",
DocumentHighlightKind.Write: "write"
}
def is_regular_view(v: sublime.View) -> bool:
return not v.sheet().is_transient() and bool(v.file_name()) and v.element() is None
def previous_non_whitespace_char(view: sublime.View, pt: int) -> str:
prev = view.substr(pt - 1)
if prev.isspace():
return view.substr(view.find_by_class(pt, False, ~0) - 1)
return prev
class ColorSchemeScopeRenderer:
def __init__(self, view: sublime.View) -> None:
self._scope_styles = {}
self._view = view
for scope in ["entity.name.function", "variable.parameter", "punctuation"]:
self._scope_styles[scope] = mdpopups.scope2style(view, scope)
def function(self, content: str, escape: bool = True) -> str:
return self._wrap_with_scope_style(content, "entity.name.function", escape=escape)
def punctuation(self, content: str) -> str:
return self._wrap_with_scope_style(content, "punctuation")
def parameter(self, content: str, emphasize: bool = False) -> str:
return self._wrap_with_scope_style(content, "variable.parameter", emphasize)
def markup(self, content: Union[str, Dict[str, str]]) -> str:
return minihtml(self._view, content, allowed_formats=FORMAT_STRING | FORMAT_MARKUP_CONTENT)
def _wrap_with_scope_style(self, content: str, scope: str, emphasize: bool = False, escape: bool = True) -> str:
color = self._scope_styles[scope]["color"]
additional_styles = 'font-weight: bold; text-decoration: underline;' if emphasize else ''
content = html.escape(content, quote=False) if escape else content
return '<span style="color: {};{}">{}</span>'.format(color, additional_styles, content)
class TextChangeListener(sublime_plugin.TextChangeListener):
ids_to_listeners = WeakValueDictionary()
@classmethod
def is_applicable(cls, buffer: sublime.Buffer) -> bool:
v = buffer.primary_view()
return v is not None and is_regular_view(v)
def __init__(self) -> None:
super().__init__()
self.view_listeners = WeakSet()
def attach(self, buffer: sublime.Buffer) -> None:
super().attach(buffer)
self.ids_to_listeners[self.buffer.buffer_id] = self
def detach(self) -> None:
self.ids_to_listeners.pop(self.buffer.buffer_id, None)
super().detach()
def on_text_changed(self, changes: Iterable[sublime.TextChange]) -> None:
view = self.buffer.primary_view()
if not view:
return
change_count = view.change_count()
frozen_listeners = WeakSet(self.view_listeners)
def notify() -> None:
for listener in list(frozen_listeners):
listener.on_text_changed_async(change_count, changes)
sublime.set_timeout_async(notify)
def on_reload_async(self) -> None:
for listener in list(self.view_listeners):
listener.on_reload_async()
def on_revert_async(self) -> None:
for listener in list(self.view_listeners):
listener.on_revert_async()
def __repr__(self) -> str:
return "TextChangeListener({})".format(self.buffer.buffer_id)
class DocumentSyncListener(sublime_plugin.ViewEventListener, AbstractViewListener):
CODE_ACTIONS_KEY = "lsp_code_action"
ACTIVE_DIAGNOSTIC = "lsp_active_diagnostic"
code_actions_debounce_time = 800
color_boxes_debounce_time = 500
highlights_debounce_time = 300
@classmethod
def applies_to_primary_view_only(cls) -> bool:
return False
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self._manager = None
self._session_views = {}
self._stored_region = sublime.Region(-1, -1)
self._color_phantoms = sublime.PhantomSet(self.view, "lsp_color")
self._sighelp = None
self._sighelp_renderer = ColorSchemeScopeRenderer(self.view)
self._language_id = ""
self._registered = False
def __del__(self) -> None:
settings = self.view.settings()
triggers = settings.get("auto_complete_triggers") or []
triggers = [trigger for trigger in triggers if 'server' not in trigger]
settings.set("auto_complete_triggers", triggers)
self._stored_region = sublime.Region(-1, -1)
self._color_phantoms.update([])
self.view.erase_status(AbstractViewListener.TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY)
self._clear_highlight_regions()
self._clear_session_views_async()
def on_session_initialized_async(self, session: Session) -> None:
assert not self.view.is_loading()
added = False
if session.config.name not in self._session_views:
self._session_views[session.config.name] = SessionView(self, session)
buf = self.view.buffer()
if buf:
text_change_listener = TextChangeListener.ids_to_listeners.get(buf.buffer_id)
if text_change_listener:
text_change_listener.view_listeners.add(self)
self.view.settings().set("lsp_active", True)
added = True
if added:
if "colorProvider" not in userprefs().disabled_capabilities:
self._do_color_boxes_async()
def on_session_shutdown_async(self, session: Session) -> None:
removed_session = self._session_views.pop(session.config.name, None)
if removed_session:
if not self._session_views:
self.view.settings().erase("lsp_active")
self._registered = False
else:
session.config.erase_view_status(self.view)
def diagnostics_panel_contribution_async(self) -> List[str]:
result = []
for severity in range(1, len(DIAGNOSTIC_SEVERITY) + 1):
for sb in self.session_buffers_async():
data = sb.data_per_severity.get(severity)
if data:
result.extend(data.panel_contribution)
return result
def diagnostics_async(self) -> Generator[Tuple[SessionBuffer, List[Diagnostic]], None, None]:
for sb in self.session_buffers_async():
yield sb, sb.diagnostics
def on_diagnostics_updated_async(self) -> None:
self._clear_code_actions_annotation()
self._do_code_actions()
self._update_diagnostic_in_status_bar_async()
def _update_diagnostic_in_status_bar_async(self) -> None:
if userprefs().show_diagnostics_in_view_status:
r = self._get_current_range_async()
if r is not None:
diags_by_config_name, _ = self.diagnostics_intersecting_range_async(r)
if diags_by_config_name:
for diags in diags_by_config_name.values():
diag = next(iter(diags), None)
if diag:
self.view.set_status(self.ACTIVE_DIAGNOSTIC, diag.message)
return
self.view.erase_status(self.ACTIVE_DIAGNOSTIC)
def session_views_async(self) -> Generator[SessionView, None, None]:
yield from self._session_views.values()
def session_buffers_async(self) -> Generator[SessionBuffer, None, None]:
for sv in self.session_views_async():
yield sv.session_buffer
def on_text_changed_async(self, change_count: int, changes: Iterable[sublime.TextChange]) -> None:
different, current_region = self._update_stored_region_async()
if self.view.is_primary():
for sv in self.session_views_async():
sv.on_text_changed_async(change_count, changes)
if not different:
return
if "documentHighlight" not in userprefs().disabled_capabilities:
self._clear_highlight_regions()
self._when_selection_remains_stable_async(self._do_highlights_async, current_region,
after_ms=self.highlights_debounce_time)
if "colorProvider" not in userprefs().disabled_capabilities:
self._when_selection_remains_stable_async(self._do_color_boxes_async, current_region,
after_ms=self.color_boxes_debounce_time)
if "signatureHelp" not in userprefs().disabled_capabilities:
self._do_signature_help(manual=False)
def on_revert_async(self) -> None:
if self.view.is_primary():
for sv in self.session_views_async():
sv.on_revert_async()
def on_reload_async(self) -> None:
if self.view.is_primary():
for sv in self.session_views_async():
sv.on_reload_async()
def get_language_id(self) -> str:
return self._language_id
def on_load_async(self) -> None:
if not self._registered and is_regular_view(self.view):
self._register_async()
def on_activated_async(self) -> None:
if not self._registered and not self.view.is_loading() and is_regular_view(self.view):
self._register_async()
def on_selection_modified_async(self) -> None:
different, current_region = self._update_stored_region_async()
if different:
if "documentHighlight" not in userprefs().disabled_capabilities:
if not self._is_in_higlighted_region(current_region.b):
self._clear_highlight_regions()
self._when_selection_remains_stable_async(self._do_highlights_async, current_region,
after_ms=self.highlights_debounce_time)
self._clear_code_actions_annotation()
self._when_selection_remains_stable_async(self._do_code_actions, current_region,
after_ms=self.code_actions_debounce_time)
self._update_diagnostic_in_status_bar_async()
def on_post_save_async(self) -> None:
if self.view.is_primary():
for sv in self.session_views_async():
sv.on_post_save_async()
def on_close(self) -> None:
self._clear_session_views_async()
def on_query_context(self, key: str, operator: int, operand: Any, match_all: bool) -> bool:
if key == "lsp.session_with_capability" and operator == sublime.OP_EQUAL and isinstance(operand, str):
capabilities = [s.strip() for s in operand.split("|")]
for capability in capabilities:
if any(self.sessions(capability)):
return True
return False
elif key in ("lsp.sessions", "setting.lsp_active"):
return bool(self._session_views)
elif key == "lsp.signature_help":
if not self.view.is_popup_visible():
if operand == 0:
sublime.set_timeout_async(lambda: self._do_signature_help(manual=True))
return True
elif self._sighelp and self._sighelp.has_multiple_signatures() and not self.view.is_auto_complete_visible():
self._sighelp.select_signature(operand)
self._update_sighelp_popup(self._sighelp.build_popup_content(self._sighelp_renderer))
return True
return False
def on_hover(self, point: int, hover_zone: int) -> None:
if (hover_zone != sublime.HOVER_TEXT
or self.view.is_popup_visible()
or "hover" in userprefs().disabled_capabilities):
return
self.view.run_command("lsp_hover", {"point": point})
def on_post_text_command(self, command_name: str, args: Optional[Dict[str, Any]]) -> None:
if command_name in ("next_field", "prev_field") and args is None:
if "signatureHelp" not in userprefs().disabled_capabilities:
sublime.set_timeout_async(lambda: self._do_signature_help(manual=True))
if not self.view.is_popup_visible():
return
if command_name in ["hide_auto_complete", "move", "commit_completion"] or 'delete' in command_name:
self.view.hide_popup()
def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:
if "completion" in userprefs().disabled_capabilities:
return None
promise = sublime.CompletionList()
sublime.set_timeout_async(lambda: self._on_query_completions_async(promise, locations[0]))
return promise
def _do_signature_help(self, manual: bool) -> None:
pos = self._stored_region.a
if pos == -1:
return
if not self.view.match_selector(pos, self.view.settings().get("auto_complete_selector") or ""):
return
session = self.session("signatureHelpProvider")
if not session:
return
triggers = session.get_capability("signatureHelpProvider.triggerCharacters") or []
if not manual and not triggers:
return
last_char = previous_non_whitespace_char(self.view, pos)
if manual or last_char in triggers:
self.purge_changes_async()
params = text_document_position_params(self.view, pos)
assert session
session.send_request_async(
Request.signatureHelp(params, self.view), lambda resp: self._on_signature_help(resp, pos))
else:
self.view.hide_popup()
self._sighelp = None
def _on_signature_help(self, response: Optional[Dict], point: int) -> None:
self._sighelp = create_signature_help(response)
if self._sighelp:
content = self._sighelp.build_popup_content(self._sighelp_renderer)
def render_sighelp_on_main_thread() -> None:
if self.view.is_popup_visible():
self._update_sighelp_popup(content)
else:
self._show_sighelp_popup(content, point)
sublime.set_timeout(render_sighelp_on_main_thread)
def _show_sighelp_popup(self, content: str, point: int) -> None:
flags = 0
flags |= sublime.HIDE_ON_MOUSE_MOVE_AWAY
flags |= sublime.COOPERATE_WITH_AUTO_COMPLETE
mdpopups.show_popup(self.view,
content,
css=css().popups,
md=True,
flags=flags,
location=point,
wrapper_class=css().popups_classname,
max_width=800,
on_hide=self._on_sighelp_hide,
on_navigate=self._on_sighelp_navigate)
self._visible = True
def _update_sighelp_popup(self, content: str) -> None:
mdpopups.update_popup(self.view,
content,
css=css().popups,
md=True,
wrapper_class=css().popups_classname)
def _on_sighelp_hide(self) -> None:
self._visible = False
def _on_sighelp_navigate(self, href: str) -> None:
webbrowser.open_new_tab(href)
def _do_code_actions(self) -> None:
stored_range = region_to_range(self.view, self._stored_region)
diagnostics_by_config, extended_range = filter_by_range(view_diagnostics(self.view), stored_range)
actions_manager.request_for_range_async(self.view, extended_range, diagnostics_by_config, self._on_code_actions)
def _on_code_actions(self, responses: CodeActionsByConfigName) -> None:
action_count = sum(map(len, responses.values()))
if action_count == 0:
return
regions = [sublime.Region(self._stored_region.b, self._stored_region.a)]
scope = ""
icon = ""
flags = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE
annotations = []
annotation_color = ""
if userprefs().show_code_actions == 'bulb':
scope = 'markup.changed'
icon = 'Packages/LSP/icons/lightbulb.png'
else:
suffix = 's' if action_count > 1 else ''
code_actions_link = make_command_link('lsp_code_actions', '{} code action{}'.format(action_count, suffix))
annotations = ["<div class=\"actions\">{}</div>".format(code_actions_link)]
annotation_color = '#2196F3'
self.view.add_regions(self.CODE_ACTIONS_KEY, regions, scope, icon, flags, annotations, annotation_color)
def _clear_code_actions_annotation(self) -> None:
self.view.erase_regions(self.CODE_ACTIONS_KEY)
def _do_color_boxes_async(self) -> None:
session = self.session("colorProvider")
if session:
session.send_request_async(
Request.documentColor(document_color_params(self.view), self.view), self._on_color_boxes)
def _on_color_boxes(self, response: Any) -> None:
color_infos = response if response else []
self._color_phantoms.update([lsp_color_to_phantom(self.view, color_info) for color_info in color_infos])
def _clear_highlight_regions(self) -> None:
for kind in userprefs().document_highlight_scopes.keys():
self.view.erase_regions("lsp_highlight_{}".format(kind))
def _is_in_higlighted_region(self, point: int) -> bool:
for kind in userprefs().document_highlight_scopes.keys():
regions = self.view.get_regions("lsp_highlight_{}".format(kind))
for r in regions:
if r.contains(point):
return True
return False
def _do_highlights_async(self) -> None:
if not len(self.view.sel()):
return
point = self.view.sel()[0].b
session = self.session("documentHighlightProvider", point)
if session:
params = text_document_position_params(self.view, point)
request = Request.documentHighlight(params, self.view)
session.send_request_async(request, self._on_highlights)
def _on_highlights(self, response: Optional[List]) -> None:
if not response:
self._clear_highlight_regions()
return
kind2regions = {}
for kind in range(0, 4):
kind2regions[_kind2name[kind]] = []
for highlight in response:
r = range_to_region(Range.from_lsp(highlight["range"]), self.view)
kind = highlight.get("kind", DocumentHighlightKind.Unknown)
if kind is not None:
kind2regions[_kind2name[kind]].append(r)
def render_highlights_on_main_thread() -> None:
self._clear_highlight_regions()
flags = userprefs().document_highlight_style_to_add_regions_flags()
for kind_str, regions in kind2regions.items():
if regions:
scope = userprefs().document_highlight_scopes.get(kind_str, None)
if scope:
self.view.add_regions("lsp_highlight_{}".format(kind_str), regions, scope=scope, flags=flags)
sublime.set_timeout(render_highlights_on_main_thread)
def _on_query_completions_async(self, promise: sublime.CompletionList, location: int) -> None:
session = self.session('completionProvider', location)
if not session:
resolve(promise, [])
return
self.purge_changes_async()
can_resolve_completion_items = bool(session.get_capability('completionProvider.resolveProvider'))
config_name = session.config.name
session.send_request_async(
Request.complete(text_document_position_params(self.view, location), self.view),
lambda res: self._on_complete_result(res, promise, can_resolve_completion_items, config_name),
lambda res: self._on_complete_error(res, promise))
def _on_complete_result(self, response: Optional[Union[dict, List]], completion_list: sublime.CompletionList,
can_resolve_completion_items: bool, session_name: str) -> None:
response_items = []
flags = 0
prefs = userprefs()
if prefs.inhibit_snippet_completions:
flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS
if prefs.inhibit_word_completions:
flags |= sublime.INHIBIT_WORD_COMPLETIONS
if isinstance(response, dict):
response_items = response["items"] or []
if response.get("isIncomplete", False):
flags |= sublime.DYNAMIC_COMPLETIONS
elif isinstance(response, list):
response_items = response
response_items = sorted(response_items, key=lambda item: item.get("sortText") or item["label"])
LspResolveDocsCommand.completions = response_items
items = [format_completion(response_item, index, can_resolve_completion_items, session_name)
for index, response_item in enumerate(response_items)]
if items:
flags |= sublime.INHIBIT_REORDER
resolve(completion_list, items, flags)
def _on_complete_error(self, error: dict, completion_list: sublime.CompletionList) -> None:
resolve(completion_list, [])
LspResolveDocsCommand.completions = []
sublime.status_message('Completion error: ' + str(error.get('message')))
@property
def manager(self) -> WindowManager:
if not self._manager:
window = self.view.window()
if window:
self._manager = windows.lookup(window)
return self._manager
def sessions(self, capability: Optional[str]) -> Generator[Session, None, None]:
for sb in self.session_buffers_async():
if capability is None or sb.has_capability(capability):
yield sb.session
def session(self, capability: str, point: Optional[int] = None) -> Optional[Session]:
return best_session(self.view, self.sessions(capability), point)
def get_capability_async(self, session: Session, capability_path: str) -> Optional[Any]:
for sv in self.session_views_async():
if sv.session == session:
return sv.get_capability(capability_path)
return None
def has_capability_async(self, session: Session, capability_path: str) -> bool:
for sv in self.session_views_async():
if sv.session == session:
return sv.has_capability(capability_path)
return False
def purge_changes_async(self) -> None:
for sv in self.session_views_async():
sv.purge_changes_async()
def trigger_on_pre_save_async(self) -> None:
for sv in self.session_views_async():
sv.on_pre_save_async(self.view.file_name() or "")
def diagnostics_intersecting_range_async(self, r: Range) -> Tuple[Dict[str, List[Diagnostic]], Range]:
return filter_by_range(self.diagnostics_async(), r)
def sum_total_errors_and_warnings_async(self) -> Tuple[int, int]:
errors = 0
warnings = 0
for sb in self.session_buffers_async():
errors += sb.total_errors
warnings += sb.total_warnings
return errors, warnings
def _when_selection_remains_stable_async(self, f: Callable[[], None], r: sublime.Region, after_ms: int) -> None:
debounced(f, after_ms, lambda: self._stored_region == r, async_thread=True)
def _register_async(self) -> None:
syntax = self.view.syntax()
if not syntax:
debug("view", self.view.id(), "has no syntax")
return
self._language_id = basescope2languageid(syntax.scope)
buf = self.view.buffer()
if not buf:
debug("not tracking bufferless view", self.view.id())
return
text_change_listener = TextChangeListener.ids_to_listeners.get(buf.buffer_id)
if not text_change_listener:
debug("couldn't find a text change listener for", self)
return
self._registered = True
self.manager.register_listener_async(self)
views = buf.views()
if not isinstance(views, list):
debug("skipping clone checks for", self)
return
self_id = self.view.id()
for view in views:
view_id = view.id()
if view_id == self_id:
continue
listeners = list(sublime_plugin.view_event_listeners[view_id])
for listener in listeners:
if isinstance(listener, DocumentSyncListener):
debug("also registering", listener)
listener.on_load_async()
def _update_stored_region_async(self) -> Tuple[bool, sublime.Region]:
current_region = self._get_current_region_async()
if current_region is not None:
if self._stored_region != current_region:
self._stored_region = current_region
return True, current_region
return False, sublime.Region(-1, -1)
def _get_current_region_async(self) -> Optional[sublime.Region]:
try:
return self.view.sel()[0]
except IndexError:
return None
def _get_current_range_async(self) -> Optional[Range]:
region = self._get_current_region_async()
if region is None:
return None
return region_to_range(self.view, region)
def _clear_session_views_async(self) -> None:
session_views = self._session_views
def clear_async() -> None:
nonlocal session_views
session_views.clear()
sublime.set_timeout_async(clear_async)
def __repr__(self) -> str:
return "ViewListener({})".format(self.view.id())
| true | true |
f7f58b922259909efa6f4df3c512c9c669ab78e9 | 2,442 | py | Python | pybrain/structure/networks/rbm.py | sveilleux1/pybrain | 1e1de73142c290edb84e29ca7850835f3e7bca8b | [
"BSD-3-Clause"
] | 2,208 | 2015-01-02T02:14:41.000Z | 2022-03-31T04:45:46.000Z | pybrain/structure/networks/rbm.py | sveilleux1/pybrain | 1e1de73142c290edb84e29ca7850835f3e7bca8b | [
"BSD-3-Clause"
] | 91 | 2015-01-08T16:42:16.000Z | 2021-12-11T19:16:35.000Z | pybrain/structure/networks/rbm.py | sveilleux1/pybrain | 1e1de73142c290edb84e29ca7850835f3e7bca8b | [
"BSD-3-Clause"
] | 786 | 2015-01-02T15:18:20.000Z | 2022-02-23T23:42:40.000Z | # -*- coding: utf-8 -*-
__author__ = 'Justin S Bayer, bayer.justin@googlemail.com'
__version__ = '$Id$'
from pybrain.structure import (LinearLayer, SigmoidLayer, FullConnection,
BiasUnit, FeedForwardNetwork)
class Rbm(object):
"""Class that holds a network and offers some shortcuts."""
@property
def params(self):
return self.con.params
pass
@property
def biasParams(self):
return self.biascon.params
@property
def visibleDim(self):
return self.net.indim
@property
def hiddenDim(self):
return self.net.outdim
def __init__(self, net):
self.net = net
self.net.sortModules()
self.bias = [i for i in self.net.modules if isinstance(i, BiasUnit)][0]
self.biascon = self.net.connections[self.bias][0]
self.visible = net['visible']
self.hidden = net['hidden']
self.con = self.net.connections[self.visible][0]
@classmethod
def fromDims(cls, visibledim, hiddendim, params=None, biasParams=None):
"""Return a restricted Boltzmann machine of the given dimensions with the
given distributions."""
net = FeedForwardNetwork()
bias = BiasUnit('bias')
visible = LinearLayer(visibledim, 'visible')
hidden = SigmoidLayer(hiddendim, 'hidden')
con1 = FullConnection(visible, hidden)
con2 = FullConnection(bias, hidden)
if params is not None:
con1.params[:] = params
if biasParams is not None:
con2.params[:] = biasParams
net.addInputModule(visible)
net.addModule(bias)
net.addOutputModule(hidden)
net.addConnection(con1)
net.addConnection(con2)
net.sortModules()
return cls(net)
@classmethod
def fromModules(cls, visible, hidden, bias, con, biascon):
net = FeedForwardNetwork()
net.addInputModule(visible)
net.addModule(bias)
net.addOutputModule(hidden)
net.addConnection(con)
net.addConnection(biascon)
net.sortModules()
return cls(net)
def invert(self):
"""Return the inverse rbm."""
# TODO: check if shape is correct
return self.__class__.fromDims(self.hiddenDim, self.visibleDim,
params=self.params)
def activate(self, inpt):
return self.net.activate(inpt)
| 29.780488 | 81 | 0.613841 |
__author__ = 'Justin S Bayer, bayer.justin@googlemail.com'
__version__ = '$Id$'
from pybrain.structure import (LinearLayer, SigmoidLayer, FullConnection,
BiasUnit, FeedForwardNetwork)
class Rbm(object):
@property
def params(self):
return self.con.params
pass
@property
def biasParams(self):
return self.biascon.params
@property
def visibleDim(self):
return self.net.indim
@property
def hiddenDim(self):
return self.net.outdim
def __init__(self, net):
self.net = net
self.net.sortModules()
self.bias = [i for i in self.net.modules if isinstance(i, BiasUnit)][0]
self.biascon = self.net.connections[self.bias][0]
self.visible = net['visible']
self.hidden = net['hidden']
self.con = self.net.connections[self.visible][0]
@classmethod
def fromDims(cls, visibledim, hiddendim, params=None, biasParams=None):
net = FeedForwardNetwork()
bias = BiasUnit('bias')
visible = LinearLayer(visibledim, 'visible')
hidden = SigmoidLayer(hiddendim, 'hidden')
con1 = FullConnection(visible, hidden)
con2 = FullConnection(bias, hidden)
if params is not None:
con1.params[:] = params
if biasParams is not None:
con2.params[:] = biasParams
net.addInputModule(visible)
net.addModule(bias)
net.addOutputModule(hidden)
net.addConnection(con1)
net.addConnection(con2)
net.sortModules()
return cls(net)
@classmethod
def fromModules(cls, visible, hidden, bias, con, biascon):
net = FeedForwardNetwork()
net.addInputModule(visible)
net.addModule(bias)
net.addOutputModule(hidden)
net.addConnection(con)
net.addConnection(biascon)
net.sortModules()
return cls(net)
def invert(self):
return self.__class__.fromDims(self.hiddenDim, self.visibleDim,
params=self.params)
def activate(self, inpt):
return self.net.activate(inpt)
| true | true |
f7f58bc625e88884f49f3c299371408da0cfb542 | 5,174 | py | Python | upydev/commandlib.py | Carglglz/upydev | 6f852e145be63705c9c8306931ffc2adac5cec12 | [
"MIT"
] | 36 | 2019-08-14T15:59:44.000Z | 2022-01-17T17:33:47.000Z | upydev/commandlib.py | Carglglz/upydev | 6f852e145be63705c9c8306931ffc2adac5cec12 | [
"MIT"
] | 21 | 2019-08-15T06:11:38.000Z | 2021-01-30T22:05:24.000Z | upydev/commandlib.py | Carglglz/upydev | 6f852e145be63705c9c8306931ffc2adac5cec12 | [
"MIT"
] | 3 | 2020-05-01T22:32:34.000Z | 2021-01-30T20:16:19.000Z | UID = "from machine import unique_id;\
from ubinascii import hexlify;hexlify(unique_id());gc.collect()"
UPYSH = "from upysh import *;gc.collect()"
HELP = "help();gc.collect()"
MODULES = "help('modules');gc.collect()"
MEM = "from micropython import mem_info;mem_info();gc.collect()"
OS_STAT = "import os;os.stat('{}');gc.collect()"
FILE_STAT = "import os;[(filename,os.stat('{0}'+str(filename))[6]) for filename in os.listdir('{0}')]"
CHECK_DIR = "import os;'{}' in os.listdir('/');gc.collect()"
STAT_FS = "import os;os.statvfs('{}');gc.collect()"
IFCONFIG = "network.WLAN(network.STA_IF).ifconfig()"
SSID = "network.WLAN(network.STA_IF).config('essid')"
BSSID = "network.WLAN(network.STA_IF).config('mac')"
RSSI = "network.WLAN(network.STA_IF).status('rssi')"
NET_INFO = "import network;[{},{},{},{}];gc.collect()".format(IFCONFIG,
SSID,
BSSID,
RSSI)
NET_SCAN = "import network;network.WLAN(network.STA_IF).scan();gc.collect()"
NET_STAT_ON = "import network;network.WLAN(network.STA_IF).active(True);gc.collect()"
NET_STAT_OFF = "import network;network.WLAN(network.STA_IF).active(False);gc.collect()"
NET_STAT_CONN = "import network;network.WLAN(network.STA_IF).connect('{}', '{}');gc.collect()"
NET_STAT = "import network;network.WLAN(network.STA_IF).active();gc.collect()"
AP_ON = "import network;network.WLAN(network.AP_IF).active(True);gc.collect()"
AP_OFF = "import network;network.WLAN(network.AP_IF).active(False);gc.collect()"
AP_STATE = "network.WLAN(network.AP_IF).active()"
AP_SSID = "network.WLAN(network.AP_IF).config('essid')"
AP_CHANNEL = "network.WLAN(network.AP_IF).config('channel')"
AP_AUTHMODE = "network.WLAN(network.AP_IF).config('authmode')"
AP_IFCONFIG = "network.WLAN(network.AP_IF).ifconfig()"
APSTAT = "import network;[{},{},{},{},{}];gc.collect()".format(
AP_STATE, AP_SSID, AP_CHANNEL, AP_AUTHMODE, AP_IFCONFIG)
AP_CONFIG = "network.WLAN(network.AP_IF).config(essid='{}',authmode=network.AUTH_WPA_WPA2_PSK, password='{}')"
AP_SCAN = "import network;network.WLAN(network.AP_IF).status('stations');gc.collect()"
I2C_CONFIG = "from machine import I2C,Pin;i2c = I2C(scl=Pin({}),sda=Pin({}));gc.collect()"
I2C_CONFIG_PYB = "from machine import I2C;i2c = I2C(scl='{}', sda='{}');gc.collect()"
I2C_SCAN = "i2c.scan();gc.collect()"
SPI_CONFIG = "from machine import SPI,Pin;spi = SPI(1, baudrate=10000000, sck=Pin({}), mosi=Pin({}), miso=Pin({})); cs = Pin({}, Pin.OUT)"
SET_RTC_LT = "from machine import RTC;rtc = RTC();rtc.datetime(({}, {}, {}, {}, {}, {}, {}+1, {}));gc.collect()"
RTC = "from machine import RTC;rtc = RTC();"
NTPTIME = "from ntptime import settime;settime();"
RTC_CONFIG = "(year, month, mday, week_of_year, hour, minute, second, milisecond) = rtc.datetime();"
UTC_ZONE = "rtc.datetime((year, month, mday, week_of_year,hour+{}, minute, second, milisecond));gc.collect()"
SET_RTC_NT = RTC + NTPTIME + RTC_CONFIG + UTC_ZONE
DATETIME = "import time; tnow = time.localtime();tnow[:6];gc.collect()"
WLAN_INIT = "from wifiutils import WIFI_UTIL; u_wlan = WIFI_UTIL(silent=False);gc.collect()"
WLAN_CONFIG = "u_wlan.sta_config('{}', '{}');gc.collect()"
WLAN_AP_CONFIG = "u_wlan.ap_config('{}', '{}');gc.collect()"
WLAN_CONN = "u_wlan.STA_conn();gc.collect()"
WLAN_AP_CONN = "u_wlan.AP_conn();gc.collect()"
SD_ENABLE_CONF = "from machine import Pin;sd_enable=Pin({}, Pin.OUT);"
SD_ENABLE_TOGGLE = "sd_enable.value(not sd_enable.value());sd_enable.value();gc.collect()"
SD_ENABLE = SD_ENABLE_CONF + SD_ENABLE_TOGGLE
SD_SDINIT = "import sdcard,os;sd = sdcard.SDCard(spi, cs);time.sleep_ms(1000);"
SD_MOUNT = "os.mount(sd, '/sd');'sd' in os.listdir('/');gc.collect()"
SD_INIT = SD_SDINIT + SD_MOUNT
SD_DEINIT = "import os;os.umount('/sd');sd_enable.off();sd_enable.value();gc.collect()"
SD_AUTO = "import SD_AM;gc.collect()"
CHECK_UPYSH2 = "import os;'upysh2.py' in os.listdir('lib');gc.collect()"
CMDDICT_ = {'UID': UID, 'UPYSH': UPYSH, 'HELP': HELP, 'MOD': MODULES,
'MEM': MEM, 'OS_STAT': OS_STAT, 'FILE_STAT': FILE_STAT,
'CHECK_DIR': CHECK_DIR, 'STAT_FS': STAT_FS,
'NET_INFO': NET_INFO, 'NET_SCAN': NET_SCAN,
'NET_STAT_ON': NET_STAT_ON, 'NET_STAT_OFF': NET_STAT_OFF,
'NET_STAT_CONN': NET_STAT_CONN, 'NET_STAT': NET_STAT,
'AP_ON': AP_ON, 'AP_OFF': AP_OFF, 'APSTAT': APSTAT,
'AP_CONFIG': AP_CONFIG, 'AP_SCAN': AP_SCAN,
'I2C_CONFIG': I2C_CONFIG, 'I2C_SCAN': I2C_SCAN,
'SPI_CONFIG': SPI_CONFIG, 'SET_RTC_LT': SET_RTC_LT,
'SET_RTC_NT': SET_RTC_NT, 'DATETIME': DATETIME,
'I2C_CONFIG_PYB': I2C_CONFIG_PYB, 'WLAN_INIT': WLAN_INIT,
'WLAN_CONFIG': WLAN_CONFIG, 'WLAN_AP_CONFIG': WLAN_AP_CONFIG,
'WLAN_CONN': WLAN_CONN, 'WLAN_AP_CONN': WLAN_AP_CONN,
'SD_ENABLE': SD_ENABLE, 'SD_INIT': SD_INIT,
'SD_DEINIT': SD_DEINIT, 'SD_AUTO': SD_AUTO, 'CHECK_UPYSH2': CHECK_UPYSH2}
_CMDDICT_ = {k: 'import gc;' + v for k, v in CMDDICT_.items()}
| 40.740157 | 138 | 0.65056 | UID = "from machine import unique_id;\
from ubinascii import hexlify;hexlify(unique_id());gc.collect()"
UPYSH = "from upysh import *;gc.collect()"
HELP = "help();gc.collect()"
MODULES = "help('modules');gc.collect()"
MEM = "from micropython import mem_info;mem_info();gc.collect()"
OS_STAT = "import os;os.stat('{}');gc.collect()"
FILE_STAT = "import os;[(filename,os.stat('{0}'+str(filename))[6]) for filename in os.listdir('{0}')]"
CHECK_DIR = "import os;'{}' in os.listdir('/');gc.collect()"
STAT_FS = "import os;os.statvfs('{}');gc.collect()"
IFCONFIG = "network.WLAN(network.STA_IF).ifconfig()"
SSID = "network.WLAN(network.STA_IF).config('essid')"
BSSID = "network.WLAN(network.STA_IF).config('mac')"
RSSI = "network.WLAN(network.STA_IF).status('rssi')"
NET_INFO = "import network;[{},{},{},{}];gc.collect()".format(IFCONFIG,
SSID,
BSSID,
RSSI)
NET_SCAN = "import network;network.WLAN(network.STA_IF).scan();gc.collect()"
NET_STAT_ON = "import network;network.WLAN(network.STA_IF).active(True);gc.collect()"
NET_STAT_OFF = "import network;network.WLAN(network.STA_IF).active(False);gc.collect()"
NET_STAT_CONN = "import network;network.WLAN(network.STA_IF).connect('{}', '{}');gc.collect()"
NET_STAT = "import network;network.WLAN(network.STA_IF).active();gc.collect()"
AP_ON = "import network;network.WLAN(network.AP_IF).active(True);gc.collect()"
AP_OFF = "import network;network.WLAN(network.AP_IF).active(False);gc.collect()"
AP_STATE = "network.WLAN(network.AP_IF).active()"
AP_SSID = "network.WLAN(network.AP_IF).config('essid')"
AP_CHANNEL = "network.WLAN(network.AP_IF).config('channel')"
AP_AUTHMODE = "network.WLAN(network.AP_IF).config('authmode')"
AP_IFCONFIG = "network.WLAN(network.AP_IF).ifconfig()"
APSTAT = "import network;[{},{},{},{},{}];gc.collect()".format(
AP_STATE, AP_SSID, AP_CHANNEL, AP_AUTHMODE, AP_IFCONFIG)
AP_CONFIG = "network.WLAN(network.AP_IF).config(essid='{}',authmode=network.AUTH_WPA_WPA2_PSK, password='{}')"
AP_SCAN = "import network;network.WLAN(network.AP_IF).status('stations');gc.collect()"
I2C_CONFIG = "from machine import I2C,Pin;i2c = I2C(scl=Pin({}),sda=Pin({}));gc.collect()"
I2C_CONFIG_PYB = "from machine import I2C;i2c = I2C(scl='{}', sda='{}');gc.collect()"
I2C_SCAN = "i2c.scan();gc.collect()"
SPI_CONFIG = "from machine import SPI,Pin;spi = SPI(1, baudrate=10000000, sck=Pin({}), mosi=Pin({}), miso=Pin({})); cs = Pin({}, Pin.OUT)"
SET_RTC_LT = "from machine import RTC;rtc = RTC();rtc.datetime(({}, {}, {}, {}, {}, {}, {}+1, {}));gc.collect()"
RTC = "from machine import RTC;rtc = RTC();"
NTPTIME = "from ntptime import settime;settime();"
RTC_CONFIG = "(year, month, mday, week_of_year, hour, minute, second, milisecond) = rtc.datetime();"
UTC_ZONE = "rtc.datetime((year, month, mday, week_of_year,hour+{}, minute, second, milisecond));gc.collect()"
SET_RTC_NT = RTC + NTPTIME + RTC_CONFIG + UTC_ZONE
DATETIME = "import time; tnow = time.localtime();tnow[:6];gc.collect()"
WLAN_INIT = "from wifiutils import WIFI_UTIL; u_wlan = WIFI_UTIL(silent=False);gc.collect()"
WLAN_CONFIG = "u_wlan.sta_config('{}', '{}');gc.collect()"
WLAN_AP_CONFIG = "u_wlan.ap_config('{}', '{}');gc.collect()"
WLAN_CONN = "u_wlan.STA_conn();gc.collect()"
WLAN_AP_CONN = "u_wlan.AP_conn();gc.collect()"
SD_ENABLE_CONF = "from machine import Pin;sd_enable=Pin({}, Pin.OUT);"
SD_ENABLE_TOGGLE = "sd_enable.value(not sd_enable.value());sd_enable.value();gc.collect()"
SD_ENABLE = SD_ENABLE_CONF + SD_ENABLE_TOGGLE
SD_SDINIT = "import sdcard,os;sd = sdcard.SDCard(spi, cs);time.sleep_ms(1000);"
SD_MOUNT = "os.mount(sd, '/sd');'sd' in os.listdir('/');gc.collect()"
SD_INIT = SD_SDINIT + SD_MOUNT
SD_DEINIT = "import os;os.umount('/sd');sd_enable.off();sd_enable.value();gc.collect()"
SD_AUTO = "import SD_AM;gc.collect()"
CHECK_UPYSH2 = "import os;'upysh2.py' in os.listdir('lib');gc.collect()"
CMDDICT_ = {'UID': UID, 'UPYSH': UPYSH, 'HELP': HELP, 'MOD': MODULES,
'MEM': MEM, 'OS_STAT': OS_STAT, 'FILE_STAT': FILE_STAT,
'CHECK_DIR': CHECK_DIR, 'STAT_FS': STAT_FS,
'NET_INFO': NET_INFO, 'NET_SCAN': NET_SCAN,
'NET_STAT_ON': NET_STAT_ON, 'NET_STAT_OFF': NET_STAT_OFF,
'NET_STAT_CONN': NET_STAT_CONN, 'NET_STAT': NET_STAT,
'AP_ON': AP_ON, 'AP_OFF': AP_OFF, 'APSTAT': APSTAT,
'AP_CONFIG': AP_CONFIG, 'AP_SCAN': AP_SCAN,
'I2C_CONFIG': I2C_CONFIG, 'I2C_SCAN': I2C_SCAN,
'SPI_CONFIG': SPI_CONFIG, 'SET_RTC_LT': SET_RTC_LT,
'SET_RTC_NT': SET_RTC_NT, 'DATETIME': DATETIME,
'I2C_CONFIG_PYB': I2C_CONFIG_PYB, 'WLAN_INIT': WLAN_INIT,
'WLAN_CONFIG': WLAN_CONFIG, 'WLAN_AP_CONFIG': WLAN_AP_CONFIG,
'WLAN_CONN': WLAN_CONN, 'WLAN_AP_CONN': WLAN_AP_CONN,
'SD_ENABLE': SD_ENABLE, 'SD_INIT': SD_INIT,
'SD_DEINIT': SD_DEINIT, 'SD_AUTO': SD_AUTO, 'CHECK_UPYSH2': CHECK_UPYSH2}
_CMDDICT_ = {k: 'import gc;' + v for k, v in CMDDICT_.items()}
| true | true |
f7f58c05da9f21eb33d4f2039e6f14989f11ab40 | 268 | py | Python | faq/models.py | VenkataRavitejaGullapudi/StudentAnalysisSystem | 8ae853e620f84906a89e8db4b45300b73665a1b3 | [
"CC-BY-3.0"
] | null | null | null | faq/models.py | VenkataRavitejaGullapudi/StudentAnalysisSystem | 8ae853e620f84906a89e8db4b45300b73665a1b3 | [
"CC-BY-3.0"
] | null | null | null | faq/models.py | VenkataRavitejaGullapudi/StudentAnalysisSystem | 8ae853e620f84906a89e8db4b45300b73665a1b3 | [
"CC-BY-3.0"
] | null | null | null | from django.db import models
# Create your models here.
class Faqs(models.Model):
email=models.EmailField()
query=models.CharField(max_length=1000)
answer=models.CharField(max_length=1000,default=" none ")
def __str__(self):
return self.query
| 26.8 | 61 | 0.723881 | from django.db import models
class Faqs(models.Model):
email=models.EmailField()
query=models.CharField(max_length=1000)
answer=models.CharField(max_length=1000,default=" none ")
def __str__(self):
return self.query
| true | true |
f7f58c41a053fb20180cc4661a0af635c7527fcc | 4,414 | py | Python | resqs/sts/models.py | UMass-Rescue/moto | 3aa52aca28c622be9708da5fd31a8c8b92801634 | [
"Apache-2.0"
] | null | null | null | resqs/sts/models.py | UMass-Rescue/moto | 3aa52aca28c622be9708da5fd31a8c8b92801634 | [
"Apache-2.0"
] | null | null | null | resqs/sts/models.py | UMass-Rescue/moto | 3aa52aca28c622be9708da5fd31a8c8b92801634 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from base64 import b64decode
import datetime
import xmltodict
from resqs.core import BaseBackend, BaseModel
from resqs.core.utils import iso_8601_datetime_with_milliseconds
from resqs.core import ACCOUNT_ID
from resqs.sts.utils import (
random_access_key_id,
random_secret_access_key,
random_session_token,
random_assumed_role_id,
DEFAULT_STS_SESSION_DURATION,
)
class Token(BaseModel):
def __init__(self, duration, name=None, policy=None):
now = datetime.datetime.utcnow()
self.expiration = now + datetime.timedelta(seconds=duration)
self.name = name
self.policy = None
@property
def expiration_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.expiration)
class AssumedRole(BaseModel):
def __init__(self, role_session_name, role_arn, policy, duration, external_id):
self.session_name = role_session_name
self.role_arn = role_arn
self.policy = policy
now = datetime.datetime.utcnow()
self.expiration = now + datetime.timedelta(seconds=duration)
self.external_id = external_id
self.access_key_id = "ASIA" + random_access_key_id()
self.secret_access_key = random_secret_access_key()
self.session_token = random_session_token()
self.assumed_role_id = "AROA" + random_assumed_role_id()
@property
def expiration_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.expiration)
@property
def user_id(self):
return self.assumed_role_id + ":" + self.session_name
@property
def arn(self):
return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format(
account_id=ACCOUNT_ID,
role_name=self.role_arn.split("/")[-1],
session_name=self.session_name,
)
class STSBackend(BaseBackend):
def __init__(self):
self.assumed_roles = []
def get_session_token(self, duration):
token = Token(duration=duration)
return token
def get_federation_token(self, name, duration, policy):
token = Token(duration=duration, name=name, policy=policy)
return token
def assume_role(self, **kwargs):
role = AssumedRole(**kwargs)
self.assumed_roles.append(role)
return role
def get_assumed_role_from_access_key(self, access_key_id):
for assumed_role in self.assumed_roles:
if assumed_role.access_key_id == access_key_id:
return assumed_role
return None
def assume_role_with_web_identity(self, **kwargs):
return self.assume_role(**kwargs)
def assume_role_with_saml(self, **kwargs):
del kwargs["principal_arn"]
saml_assertion_encoded = kwargs.pop("saml_assertion")
saml_assertion_decoded = b64decode(saml_assertion_encoded)
namespaces = {
"urn:oasis:names:tc:SAML:2.0:protocol": "samlp",
"urn:oasis:names:tc:SAML:2.0:assertion": "saml",
}
saml_assertion = xmltodict.parse(
saml_assertion_decoded.decode("utf-8"),
force_cdata=True,
process_namespaces=True,
namespaces=namespaces,
)
saml_assertion_attributes = saml_assertion["samlp:Response"]["saml:Assertion"][
"saml:AttributeStatement"
]["saml:Attribute"]
for attribute in saml_assertion_attributes:
if (
attribute["@Name"]
== "https://aws.amazon.com/SAML/Attributes/RoleSessionName"
):
kwargs["role_session_name"] = attribute["saml:AttributeValue"]["#text"]
if (
attribute["@Name"]
== "https://aws.amazon.com/SAML/Attributes/SessionDuration"
):
kwargs["duration"] = int(attribute["saml:AttributeValue"]["#text"])
if "duration" not in kwargs:
kwargs["duration"] = DEFAULT_STS_SESSION_DURATION
kwargs["external_id"] = None
kwargs["policy"] = None
role = AssumedRole(**kwargs)
self.assumed_roles.append(role)
return role
def get_caller_identity(self):
# Logic resides in responses.py
# Fake method here to make implementation coverage script aware that this method is implemented
pass
sts_backend = STSBackend()
| 33.439394 | 103 | 0.656094 | from __future__ import unicode_literals
from base64 import b64decode
import datetime
import xmltodict
from resqs.core import BaseBackend, BaseModel
from resqs.core.utils import iso_8601_datetime_with_milliseconds
from resqs.core import ACCOUNT_ID
from resqs.sts.utils import (
random_access_key_id,
random_secret_access_key,
random_session_token,
random_assumed_role_id,
DEFAULT_STS_SESSION_DURATION,
)
class Token(BaseModel):
def __init__(self, duration, name=None, policy=None):
now = datetime.datetime.utcnow()
self.expiration = now + datetime.timedelta(seconds=duration)
self.name = name
self.policy = None
@property
def expiration_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.expiration)
class AssumedRole(BaseModel):
def __init__(self, role_session_name, role_arn, policy, duration, external_id):
self.session_name = role_session_name
self.role_arn = role_arn
self.policy = policy
now = datetime.datetime.utcnow()
self.expiration = now + datetime.timedelta(seconds=duration)
self.external_id = external_id
self.access_key_id = "ASIA" + random_access_key_id()
self.secret_access_key = random_secret_access_key()
self.session_token = random_session_token()
self.assumed_role_id = "AROA" + random_assumed_role_id()
@property
def expiration_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.expiration)
@property
def user_id(self):
return self.assumed_role_id + ":" + self.session_name
@property
def arn(self):
return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format(
account_id=ACCOUNT_ID,
role_name=self.role_arn.split("/")[-1],
session_name=self.session_name,
)
class STSBackend(BaseBackend):
def __init__(self):
self.assumed_roles = []
def get_session_token(self, duration):
token = Token(duration=duration)
return token
def get_federation_token(self, name, duration, policy):
token = Token(duration=duration, name=name, policy=policy)
return token
def assume_role(self, **kwargs):
role = AssumedRole(**kwargs)
self.assumed_roles.append(role)
return role
def get_assumed_role_from_access_key(self, access_key_id):
for assumed_role in self.assumed_roles:
if assumed_role.access_key_id == access_key_id:
return assumed_role
return None
def assume_role_with_web_identity(self, **kwargs):
return self.assume_role(**kwargs)
def assume_role_with_saml(self, **kwargs):
del kwargs["principal_arn"]
saml_assertion_encoded = kwargs.pop("saml_assertion")
saml_assertion_decoded = b64decode(saml_assertion_encoded)
namespaces = {
"urn:oasis:names:tc:SAML:2.0:protocol": "samlp",
"urn:oasis:names:tc:SAML:2.0:assertion": "saml",
}
saml_assertion = xmltodict.parse(
saml_assertion_decoded.decode("utf-8"),
force_cdata=True,
process_namespaces=True,
namespaces=namespaces,
)
saml_assertion_attributes = saml_assertion["samlp:Response"]["saml:Assertion"][
"saml:AttributeStatement"
]["saml:Attribute"]
for attribute in saml_assertion_attributes:
if (
attribute["@Name"]
== "https://aws.amazon.com/SAML/Attributes/RoleSessionName"
):
kwargs["role_session_name"] = attribute["saml:AttributeValue"]["#text"]
if (
attribute["@Name"]
== "https://aws.amazon.com/SAML/Attributes/SessionDuration"
):
kwargs["duration"] = int(attribute["saml:AttributeValue"]["#text"])
if "duration" not in kwargs:
kwargs["duration"] = DEFAULT_STS_SESSION_DURATION
kwargs["external_id"] = None
kwargs["policy"] = None
role = AssumedRole(**kwargs)
self.assumed_roles.append(role)
return role
def get_caller_identity(self):
pass
sts_backend = STSBackend()
| true | true |
f7f58c9bb4ac82658d6bd77c7a33a909b3368cd0 | 10,779 | py | Python | doc/large-def/update_formula.py | ajey091/neml | 23dd2cdb83057fdd17a37fa19f4592c54f821dbf | [
"MIT"
] | 6 | 2020-05-06T17:04:29.000Z | 2021-08-03T20:02:22.000Z | doc/large-def/update_formula.py | ajey091/neml | 23dd2cdb83057fdd17a37fa19f4592c54f821dbf | [
"MIT"
] | 66 | 2018-10-26T01:32:43.000Z | 2022-02-01T03:02:18.000Z | doc/large-def/update_formula.py | ajey091/neml | 23dd2cdb83057fdd17a37fa19f4592c54f821dbf | [
"MIT"
] | 14 | 2018-11-28T17:07:24.000Z | 2022-01-06T16:57:15.000Z | #!/usr/bin/env python
from __future__ import division
import numpy as np
import copy
import itertools
from sympy import *
mandel = ((0,0),(1,1),(2,2),(1,2),(0,2),(0,1))
mandel_mults = (1,1,1,sqrt(2),sqrt(2),sqrt(2))
skew_inds = ((1,2),(0,2),(0,1))
skew_mults = (-1,1,-1)
def object_einsum(string, *arrays):
"""Simplified object einsum, not as much error checking
does not support "..." or list input and will see "...", etc. as three times
an axes identifier, tries normal einsum first!
NOTE: This is untested, and not fast, but object type is
never really fast anyway...
"""
try:
return np.einsum(string, *arrays)
except TypeError:
pass
s = string.split('->')
in_op = s[0].split(',')
out_op = None if len(s) == 1 else s[1].replace(' ', '')
in_op = [axes.replace(' ', '') for axes in in_op]
all_axes = set()
for axes in in_op:
all_axes.update(axes)
if out_op is None:
out_op = sorted(all_axes)
else:
all_axes.update(out_op)
perm_dict = {_[1]: _[0] for _ in enumerate(all_axes)}
dims = len(perm_dict)
op_axes = []
for axes in (in_op + list((out_op,))):
op = [-1] * dims
for i, ax in enumerate(axes):
op[perm_dict[ax]] = i
op_axes.append(op)
op_flags = [('readonly',)] * len(in_op) + [('readwrite', 'allocate')]
dtypes = [np.object_] * (len(in_op) + 1) # cast all to object
nditer = np.nditer(arrays + (None,), op_axes=op_axes, flags=['buffered', 'delay_bufalloc', 'reduce_ok', 'grow_inner', 'refs_ok'], op_dtypes=dtypes, op_flags=op_flags)
nditer.operands[-1][...] = 0
nditer.reset()
for vals in nditer:
out = vals[-1]
prod = copy.deepcopy(vals[0])
for value in vals[1:-1]:
prod *= value
out += prod
return nditer.operands[-1]
def zero_tensor(shape):
l = np.prod(shape)
return MutableDenseNDimArray([0]*l, shape)
def piece_together_fourth(Dp, Wp):
"""
Take the skew and symmetric parts of a algorithmic tangent and piece them back together
"""
sym_id = (object_einsum('ik,jl', eye(3), eye(3)) + object_einsum('jk,il', eye(3), eye(3)))/2
skew_id = (object_einsum('ik,jl', eye(3), eye(3)) - object_einsum('jk,il', eye(3), eye(3)))/2
D = sym_tensor_part(Dp)
W = skew_tensor_part(Wp)
res = zero_tensor((3,3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
for m in range(3):
for n in range(3):
res[i,j,m,n] += D[i,j,k,l] * sym_id[k,l,m,n] + W[i,j,k,l] * skew_id[k,l,m,n]
return res
def sym_tensor_part(C):
"""
Take a Mandel stiffness in my notation and convert it back to a full tensor
"""
Ct = zero_tensor((3,3,3,3))
for a in range(6):
for b in range(6):
ind_a = itertools.permutations(mandel[a], r=2)
ind_b = itertools.permutations(mandel[b], r=2)
ma = mandel_mults[a]
mb = mandel_mults[b]
indexes = tuple(ai+bi for ai, bi in itertools.product(ind_a, ind_b))
for ind in indexes:
Ct[ind] = C[a,b] / ma*mb
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
if l < k:
Ct[i,j,k,l] = 0
return Ct
def skew_tensor_part(C):
"""
Take a skew stiffness in my notation and convert it back to a full tensor
"""
Ct = zero_tensor((3,3,3,3))
for a in range(6):
for b in range(3):
inds_a = mandel[a]
inds_b = skew_inds[b]
mult_a = mandel_mults[a]
mult_b = skew_mults[b]
for ord_a in ((0,1),(1,0)):
for ord_b, f in zip(((0,1),(1,0)), (1,-1)):
ind = tuple([inds_a[aa] for aa in ord_a] + [inds_b[bb] for bb in ord_b])
Ct[ind] = C[a,b] * mult_a*mult_b * f
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
if i != j:
Ct[i,j,k,l] /= 2
if l < k:
Ct[i,j,k,l] = 0
return Ct
def unroll_fourth(T):
M = zeros(9,9)
for i in range(3):
for j in range(3):
a = i*3+j
for k in range(3):
for l in range(3):
b = k*3+l
M[a,b] = T[i,j,k,l]
return M
def reroll_fourth(M):
"""
Undo unroll_fourth
"""
T = zero_tensor((3,3,3,3))
for a in range(9):
i = a // 3
j = a % 3
for b in range(9):
k = b // 3
l = b % 3
T[i,j,k,l] = M[a,b]
return T
def unroll_second(T):
return Matrix([T[i,j] for i in range(3) for j in range(3)])
def ms2ts(C):
"""
Convert a Mandel notation stiffness matrix to a full stiffness tensor.
"""
Ct = zero_tensor((3,3,3,3))
for a in range(6):
for b in range(6):
ind_a = itertools.permutations(mandel[a], r=2)
ind_b = itertools.permutations(mandel[b], r=2)
ma = mandel_mults[a]
mb = mandel_mults[b]
indexes = tuple(ai+bi for ai, bi in itertools.product(ind_a, ind_b))
for i,j,k,l in indexes:
Ct[i,j,k,l] = C[a,b] / (ma*mb)
return Ct
def ts2ms(C):
"""
Convert a stiffness tensor into a Mandel notation stiffness matrix
"""
Cv = zeros(6,6)
for i in range(6):
for j in range(6):
ma = mandel_mults[i]
mb = mandel_mults[j]
Cv[i,j] = C[mandel[i]+mandel[j]] * ma * mb
return Cv
def ws2ts(C):
"""
Convert a skew notation stiffness matrix to a full stiffness tensor.
"""
Ct = zero_tensor((3,3,3,3))
for a in range(6):
for b in range(3):
inds_a = mandel[a]
inds_b = skew_inds[b]
mult_a = mandel_mults[a]
mult_b = skew_mults[b]
for ord_a in ((0,1),(1,0)):
for ord_b, f in zip(((0,1),(1,0)), (1,-1)):
ind = tuple([inds_a[aa] for aa in ord_a] + [inds_b[bb] for bb in ord_b])
Ct[ind] = C[a,b] / (mult_a*mult_b) * f
return Ct
def ts2ws(C):
"""
Convert a stiffness tensor into a skew notation stiffness matrix
"""
Cv = zeros(6,3)
for i in range(6):
for j in range(3):
ma = mandel_mults[i]
mb = skew_mults[j]
Cv[i,j] = C[mandel[i]+skew_inds[j]] * ma * mb
return Cv
def wws2ts(C):
"""
Convert a skew notation stiffness matrix to a full stiffness tensor.
"""
Ct = zero_tensor((3,3,3,3))
for a in range(3):
for b in range(6):
inds_a = skew_inds[a]
inds_b = mandel[b]
mult_a = skew_mults[a]
mult_b = mandel_mults[b]
for ord_a, f in zip(((0,1),(1,0)),(1,-1)):
for ord_b in ((0,1),(1,0)):
ind = tuple([inds_a[aa] for aa in ord_a] + [inds_b[bb] for bb in ord_b])
Ct[ind] = C[a,b] / (mult_a * mult_b) * f
return Ct
def ts2wws(C):
"""
Convert a stiffness tensor into a skew notation stiffness matrix
"""
Cv = zeros(3,6)
for i in range(3):
for j in range(6):
ma = skew_mults[i]
mb = mandel_mults[j]
Cv[i,j] = C[skew_inds[i] + mandel[j]] * ma * mb
return Cv
def trace(X):
return (X[0,0] + X[1,1] + X[2,2])
def sym(A):
return [A[0,0], A[1,1], A[2,2], sqrt(2) * A[1,2], sqrt(2) * A[0,2], sqrt(2) * A[0,1]]
def update():
W0, W1, W2 = symbols("W[0] W[1] W[2]")
D0, D1, D2, D3, D4, D5 = symbols("D[0] D[1] D[2] D[3] D[4] D[5]")
S0, S1, S2, S3, S4, S5 = symbols("Sn[0] Sn[1] Sn[2] Sn[3] Sn[4] Sn[5]")
O0, O1, O2, O3, O4, O5 = symbols("So[0] So[1] So[2] So[3] So[4] So[5]")
G00, G01, G02, G10, G11, G12, G20, G21, G22 = symbols("G00 G01 G02 G10 G11 G12 G20 G21 G22")
W = Matrix([[0,-W2,W1],[W2,0,-W0],[-W1,W0,0]])
D = Matrix([[D0, D5/sqrt(2), D4/sqrt(2)],[D5/sqrt(2),D1,D3/sqrt(2)],[D4/sqrt(2),D3/sqrt(2),D2]])
L = D + W
S = Matrix([[S0, S5/sqrt(2), S4/sqrt(2)],[S5/sqrt(2),S1,S3/sqrt(2)],[S4/sqrt(2),S3/sqrt(2),S2]])
O = Matrix([[O0, O5/sqrt(2), O4/sqrt(2)],[O5/sqrt(2),O1,O3/sqrt(2)],[O4/sqrt(2),O3/sqrt(2),O2]])
G = Matrix([[G00,G01,G02],[G10,G11,G12],[G20,G21,G22]])
V1 = simplify(O + S*(L.T) + L*S - (trace(L) * S))
V11 = simplify(sym(V1))
print("RHS")
for i in range(6):
print(("\tSt[%i] = " + str(V11[i]) + ";") % i)
print("")
J = object_einsum('im,jn', eye(3), eye(3)) * (1 + trace(L)) - object_einsum('im,jn', eye(3), L) - object_einsum('im,jn', L, eye(3))
JM = simplify(unroll_fourth(J))
print("Matrix")
for i in range(9):
for j in range(9):
print(("\tM[%i] = " + str(JM[i,j]) + ";") % (i*9+j))
print("")
def tangent():
sym_mat = Matrix([[symbols("M[%i]" % (i*6+j)) for j in range(6)] for i in range(6)])
sym_ten = ms2ts(sym_mat)
sym_full = unroll_fourth(sym_ten)
skew_mat = Matrix([[symbols("M[%i]" % (i*3+j)) for j in range(3)] for i in range(6)])
skew_ten = ws2ts(skew_mat)
skew_full = unroll_fourth(skew_ten)
wws_mat = Matrix([[symbols("M[%i]" % (i*6+j)) for j in range(6)] for i in range(3)])
wws_ten = wws2ts(wws_mat)
wws_full = unroll_fourth(wws_ten)
A_mat = Matrix([[symbols("A[%i]" % (i*9+j)) for j in range(9)] for i in range(9)])
A_ten = reroll_fourth(A_mat)
A_mandel = ts2ms(A_ten)
A_skew = ts2ws(A_ten)
A_wws = ts2wws(A_ten)
print("Mandel->9x9")
for i in range(9):
for j in range(9):
print(("\tA[%i] = " + str(sym_full[i,j]) + ";") % (i*9+j))
print("")
print("9x9->Mandel")
for i in range(6):
for j in range(6):
print(("\tM[%i] = " + str(A_mandel[i,j]) + ";") % (i*6+j))
print("")
print("Skew->9x9")
for i in range(9):
for j in range(9):
print(("\tA[%i] = " + str(skew_full[i,j]) + ";") % (i*9+j))
print("")
print("9x9->Skew")
for i in range(6):
for j in range(3):
print(("\tM[%i] = " + str(A_skew[i,j]) + ";") % (i*3+j))
print("")
print("WWS->9x9")
for i in range(9):
for j in range(9):
print(("\tA[%i] = " + str(wws_full[i,j]) + ";") % (i*9+j))
print("")
print("9x9->WWS")
for i in range(3):
for j in range(6):
print(("\tM[%i] = " + str(A_wws[i,j]) + ";") % (i*6+j))
print("")
S0, S1, S2, S3, S4, S5 = symbols("S[0] S[1] S[2] S[3] S[4] S[5]")
S = Matrix([[S0, S5/sqrt(2), S4/sqrt(2)],[S5/sqrt(2),S1,S3/sqrt(2)],[S4/sqrt(2),S3/sqrt(2),S2]])
tensor = simplify(object_einsum('in,jm', S, eye(3)) + object_einsum('im,nj', eye(3), S) - object_einsum('ij,mn', S, eye(3)))
matrix = simplify(unroll_fourth(tensor))
print("Matrix")
for i in range(9):
for j in range(9):
print(("\tM[%i] = " + str(matrix[i,j]) + ";") % (i*9+j))
print("")
def parts_to_whole():
sym_mat = Matrix([[symbols("D[%i]" % (i*6+j)) for j in range(6)] for i in range(6)])
skew_mat = Matrix([[symbols("W[%i]" % (i*3+j)) for j in range(3)] for i in range(6)])
tensor = piece_together_fourth(sym_mat, skew_mat)
matrix = simplify(unroll_fourth(tensor))
print("Matrix")
for i in range(9):
for j in range(9):
print(("\tM[%i] = " + str(matrix[i,j]) + ";") % (i*9+j))
print("")
if __name__ == "__main__":
init_printing(use_unicode=True)
#update()
tangent()
#parts_to_whole()
| 27.288608 | 168 | 0.557287 |
from __future__ import division
import numpy as np
import copy
import itertools
from sympy import *
mandel = ((0,0),(1,1),(2,2),(1,2),(0,2),(0,1))
mandel_mults = (1,1,1,sqrt(2),sqrt(2),sqrt(2))
skew_inds = ((1,2),(0,2),(0,1))
skew_mults = (-1,1,-1)
def object_einsum(string, *arrays):
try:
return np.einsum(string, *arrays)
except TypeError:
pass
s = string.split('->')
in_op = s[0].split(',')
out_op = None if len(s) == 1 else s[1].replace(' ', '')
in_op = [axes.replace(' ', '') for axes in in_op]
all_axes = set()
for axes in in_op:
all_axes.update(axes)
if out_op is None:
out_op = sorted(all_axes)
else:
all_axes.update(out_op)
perm_dict = {_[1]: _[0] for _ in enumerate(all_axes)}
dims = len(perm_dict)
op_axes = []
for axes in (in_op + list((out_op,))):
op = [-1] * dims
for i, ax in enumerate(axes):
op[perm_dict[ax]] = i
op_axes.append(op)
op_flags = [('readonly',)] * len(in_op) + [('readwrite', 'allocate')]
dtypes = [np.object_] * (len(in_op) + 1)
nditer = np.nditer(arrays + (None,), op_axes=op_axes, flags=['buffered', 'delay_bufalloc', 'reduce_ok', 'grow_inner', 'refs_ok'], op_dtypes=dtypes, op_flags=op_flags)
nditer.operands[-1][...] = 0
nditer.reset()
for vals in nditer:
out = vals[-1]
prod = copy.deepcopy(vals[0])
for value in vals[1:-1]:
prod *= value
out += prod
return nditer.operands[-1]
def zero_tensor(shape):
l = np.prod(shape)
return MutableDenseNDimArray([0]*l, shape)
def piece_together_fourth(Dp, Wp):
sym_id = (object_einsum('ik,jl', eye(3), eye(3)) + object_einsum('jk,il', eye(3), eye(3)))/2
skew_id = (object_einsum('ik,jl', eye(3), eye(3)) - object_einsum('jk,il', eye(3), eye(3)))/2
D = sym_tensor_part(Dp)
W = skew_tensor_part(Wp)
res = zero_tensor((3,3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
for m in range(3):
for n in range(3):
res[i,j,m,n] += D[i,j,k,l] * sym_id[k,l,m,n] + W[i,j,k,l] * skew_id[k,l,m,n]
return res
def sym_tensor_part(C):
Ct = zero_tensor((3,3,3,3))
for a in range(6):
for b in range(6):
ind_a = itertools.permutations(mandel[a], r=2)
ind_b = itertools.permutations(mandel[b], r=2)
ma = mandel_mults[a]
mb = mandel_mults[b]
indexes = tuple(ai+bi for ai, bi in itertools.product(ind_a, ind_b))
for ind in indexes:
Ct[ind] = C[a,b] / ma*mb
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
if l < k:
Ct[i,j,k,l] = 0
return Ct
def skew_tensor_part(C):
Ct = zero_tensor((3,3,3,3))
for a in range(6):
for b in range(3):
inds_a = mandel[a]
inds_b = skew_inds[b]
mult_a = mandel_mults[a]
mult_b = skew_mults[b]
for ord_a in ((0,1),(1,0)):
for ord_b, f in zip(((0,1),(1,0)), (1,-1)):
ind = tuple([inds_a[aa] for aa in ord_a] + [inds_b[bb] for bb in ord_b])
Ct[ind] = C[a,b] * mult_a*mult_b * f
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
if i != j:
Ct[i,j,k,l] /= 2
if l < k:
Ct[i,j,k,l] = 0
return Ct
def unroll_fourth(T):
M = zeros(9,9)
for i in range(3):
for j in range(3):
a = i*3+j
for k in range(3):
for l in range(3):
b = k*3+l
M[a,b] = T[i,j,k,l]
return M
def reroll_fourth(M):
T = zero_tensor((3,3,3,3))
for a in range(9):
i = a // 3
j = a % 3
for b in range(9):
k = b // 3
l = b % 3
T[i,j,k,l] = M[a,b]
return T
def unroll_second(T):
return Matrix([T[i,j] for i in range(3) for j in range(3)])
def ms2ts(C):
Ct = zero_tensor((3,3,3,3))
for a in range(6):
for b in range(6):
ind_a = itertools.permutations(mandel[a], r=2)
ind_b = itertools.permutations(mandel[b], r=2)
ma = mandel_mults[a]
mb = mandel_mults[b]
indexes = tuple(ai+bi for ai, bi in itertools.product(ind_a, ind_b))
for i,j,k,l in indexes:
Ct[i,j,k,l] = C[a,b] / (ma*mb)
return Ct
def ts2ms(C):
Cv = zeros(6,6)
for i in range(6):
for j in range(6):
ma = mandel_mults[i]
mb = mandel_mults[j]
Cv[i,j] = C[mandel[i]+mandel[j]] * ma * mb
return Cv
def ws2ts(C):
Ct = zero_tensor((3,3,3,3))
for a in range(6):
for b in range(3):
inds_a = mandel[a]
inds_b = skew_inds[b]
mult_a = mandel_mults[a]
mult_b = skew_mults[b]
for ord_a in ((0,1),(1,0)):
for ord_b, f in zip(((0,1),(1,0)), (1,-1)):
ind = tuple([inds_a[aa] for aa in ord_a] + [inds_b[bb] for bb in ord_b])
Ct[ind] = C[a,b] / (mult_a*mult_b) * f
return Ct
def ts2ws(C):
Cv = zeros(6,3)
for i in range(6):
for j in range(3):
ma = mandel_mults[i]
mb = skew_mults[j]
Cv[i,j] = C[mandel[i]+skew_inds[j]] * ma * mb
return Cv
def wws2ts(C):
Ct = zero_tensor((3,3,3,3))
for a in range(3):
for b in range(6):
inds_a = skew_inds[a]
inds_b = mandel[b]
mult_a = skew_mults[a]
mult_b = mandel_mults[b]
for ord_a, f in zip(((0,1),(1,0)),(1,-1)):
for ord_b in ((0,1),(1,0)):
ind = tuple([inds_a[aa] for aa in ord_a] + [inds_b[bb] for bb in ord_b])
Ct[ind] = C[a,b] / (mult_a * mult_b) * f
return Ct
def ts2wws(C):
Cv = zeros(3,6)
for i in range(3):
for j in range(6):
ma = skew_mults[i]
mb = mandel_mults[j]
Cv[i,j] = C[skew_inds[i] + mandel[j]] * ma * mb
return Cv
def trace(X):
return (X[0,0] + X[1,1] + X[2,2])
def sym(A):
return [A[0,0], A[1,1], A[2,2], sqrt(2) * A[1,2], sqrt(2) * A[0,2], sqrt(2) * A[0,1]]
def update():
W0, W1, W2 = symbols("W[0] W[1] W[2]")
D0, D1, D2, D3, D4, D5 = symbols("D[0] D[1] D[2] D[3] D[4] D[5]")
S0, S1, S2, S3, S4, S5 = symbols("Sn[0] Sn[1] Sn[2] Sn[3] Sn[4] Sn[5]")
O0, O1, O2, O3, O4, O5 = symbols("So[0] So[1] So[2] So[3] So[4] So[5]")
G00, G01, G02, G10, G11, G12, G20, G21, G22 = symbols("G00 G01 G02 G10 G11 G12 G20 G21 G22")
W = Matrix([[0,-W2,W1],[W2,0,-W0],[-W1,W0,0]])
D = Matrix([[D0, D5/sqrt(2), D4/sqrt(2)],[D5/sqrt(2),D1,D3/sqrt(2)],[D4/sqrt(2),D3/sqrt(2),D2]])
L = D + W
S = Matrix([[S0, S5/sqrt(2), S4/sqrt(2)],[S5/sqrt(2),S1,S3/sqrt(2)],[S4/sqrt(2),S3/sqrt(2),S2]])
O = Matrix([[O0, O5/sqrt(2), O4/sqrt(2)],[O5/sqrt(2),O1,O3/sqrt(2)],[O4/sqrt(2),O3/sqrt(2),O2]])
G = Matrix([[G00,G01,G02],[G10,G11,G12],[G20,G21,G22]])
V1 = simplify(O + S*(L.T) + L*S - (trace(L) * S))
V11 = simplify(sym(V1))
print("RHS")
for i in range(6):
print(("\tSt[%i] = " + str(V11[i]) + ";") % i)
print("")
J = object_einsum('im,jn', eye(3), eye(3)) * (1 + trace(L)) - object_einsum('im,jn', eye(3), L) - object_einsum('im,jn', L, eye(3))
JM = simplify(unroll_fourth(J))
print("Matrix")
for i in range(9):
for j in range(9):
print(("\tM[%i] = " + str(JM[i,j]) + ";") % (i*9+j))
print("")
def tangent():
sym_mat = Matrix([[symbols("M[%i]" % (i*6+j)) for j in range(6)] for i in range(6)])
sym_ten = ms2ts(sym_mat)
sym_full = unroll_fourth(sym_ten)
skew_mat = Matrix([[symbols("M[%i]" % (i*3+j)) for j in range(3)] for i in range(6)])
skew_ten = ws2ts(skew_mat)
skew_full = unroll_fourth(skew_ten)
wws_mat = Matrix([[symbols("M[%i]" % (i*6+j)) for j in range(6)] for i in range(3)])
wws_ten = wws2ts(wws_mat)
wws_full = unroll_fourth(wws_ten)
A_mat = Matrix([[symbols("A[%i]" % (i*9+j)) for j in range(9)] for i in range(9)])
A_ten = reroll_fourth(A_mat)
A_mandel = ts2ms(A_ten)
A_skew = ts2ws(A_ten)
A_wws = ts2wws(A_ten)
print("Mandel->9x9")
for i in range(9):
for j in range(9):
print(("\tA[%i] = " + str(sym_full[i,j]) + ";") % (i*9+j))
print("")
print("9x9->Mandel")
for i in range(6):
for j in range(6):
print(("\tM[%i] = " + str(A_mandel[i,j]) + ";") % (i*6+j))
print("")
print("Skew->9x9")
for i in range(9):
for j in range(9):
print(("\tA[%i] = " + str(skew_full[i,j]) + ";") % (i*9+j))
print("")
print("9x9->Skew")
for i in range(6):
for j in range(3):
print(("\tM[%i] = " + str(A_skew[i,j]) + ";") % (i*3+j))
print("")
print("WWS->9x9")
for i in range(9):
for j in range(9):
print(("\tA[%i] = " + str(wws_full[i,j]) + ";") % (i*9+j))
print("")
print("9x9->WWS")
for i in range(3):
for j in range(6):
print(("\tM[%i] = " + str(A_wws[i,j]) + ";") % (i*6+j))
print("")
S0, S1, S2, S3, S4, S5 = symbols("S[0] S[1] S[2] S[3] S[4] S[5]")
S = Matrix([[S0, S5/sqrt(2), S4/sqrt(2)],[S5/sqrt(2),S1,S3/sqrt(2)],[S4/sqrt(2),S3/sqrt(2),S2]])
tensor = simplify(object_einsum('in,jm', S, eye(3)) + object_einsum('im,nj', eye(3), S) - object_einsum('ij,mn', S, eye(3)))
matrix = simplify(unroll_fourth(tensor))
print("Matrix")
for i in range(9):
for j in range(9):
print(("\tM[%i] = " + str(matrix[i,j]) + ";") % (i*9+j))
print("")
def parts_to_whole():
sym_mat = Matrix([[symbols("D[%i]" % (i*6+j)) for j in range(6)] for i in range(6)])
skew_mat = Matrix([[symbols("W[%i]" % (i*3+j)) for j in range(3)] for i in range(6)])
tensor = piece_together_fourth(sym_mat, skew_mat)
matrix = simplify(unroll_fourth(tensor))
print("Matrix")
for i in range(9):
for j in range(9):
print(("\tM[%i] = " + str(matrix[i,j]) + ";") % (i*9+j))
print("")
if __name__ == "__main__":
init_printing(use_unicode=True)
tangent()
| true | true |
f7f58d3f28889bcfd896a6890a818feedfcd250c | 11,653 | py | Python | detectron2/evaluation/panoptic_evaluation.py | mvdelt/detectron2 | c320a6f0e6facb9c5d6dda8263c8e76834c04246 | [
"Apache-2.0"
] | null | null | null | detectron2/evaluation/panoptic_evaluation.py | mvdelt/detectron2 | c320a6f0e6facb9c5d6dda8263c8e76834c04246 | [
"Apache-2.0"
] | null | null | null | detectron2/evaluation/panoptic_evaluation.py | mvdelt/detectron2 | c320a6f0e6facb9c5d6dda8263c8e76834c04246 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import itertools
import json
import logging
import numpy as np
import os
import tempfile
from collections import OrderedDict
from PIL import Image
from tabulate import tabulate
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from .evaluator import DatasetEvaluator
logger = logging.getLogger(__name__)
class COCOPanopticEvaluator(DatasetEvaluator):
"""
Evaluate Panoptic Quality metrics on COCO using PanopticAPI.
It saves panoptic segmentation prediction in `output_dir`
It contains a synchronize call and has to be called from all workers.
"""
def __init__(self, dataset_name, output_dir):
"""
Args:
dataset_name (str): name of the dataset
output_dir (str): output directory to save results for evaluation
"""
self._metadata = MetadataCatalog.get(dataset_name)
self._thing_contiguous_id_to_dataset_id = {
v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
}
self._stuff_contiguous_id_to_dataset_id = {
v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items()
}
PathManager.mkdirs(output_dir)
# self._predictions_json = os.path.join(output_dir, "predictions.json")
self._predictions_json = os.path.join(output_dir, "predictionsTestJ.json")
def reset(self):
self._predictions = []
def _convert_category_id(self, segment_info):
isthing = segment_info.pop("isthing", None)
if isthing is None:
# the model produces panoptic category id directly. No more conversion needed
return segment_info
if isthing is True:
segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[
segment_info["category_id"]
]
else:
segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[
segment_info["category_id"]
]
return segment_info
def process(self, inputs, outputs):
# print('j) COCOPanopticEvaluator.process starts!!! cococococococococococococococococo')
# print(f'j) inputs: {inputs}')
# print(f'j) len(inputs): {len(inputs)}') # 1 (예상대로임. 테스트시에는 뱃치사이즈가 1이라고 어디서 봤음.)
# print(f'j) outputs: {outputs}')
# print(f'j) len(outputs): {len(outputs)}') # 1
from panopticapi.utils import id2rgb
for input, output in zip(inputs, outputs):
# i.21.3.29.21:26)
# 여기(COCOPanopticEvaluator)서는 output["panoptic_seg"] 를 이용한다!!
# CityscapesSemSegEvaluator 에선 output["sem_seg"] 를,
# CityscapesInstanceEvaluator 에선 output["instances"] 를 이용함!!
# Det2 문서의 모델 아웃풋 부분 보면, 모델의 아웃풋인 list[dict] 의 각 dict(하나의 이미지에 대응)가 가질수있는 키들 중
# 이렇게 3가지("instances", "sem_seg", "panoptic_seg") 가 주요 키들임.
# Det2 내장 panoptic deeplab 플젝의 train_net.py 의 Trainer.build_evaluator 메서드 보면
# 이 3개의 이밸류에이터(DatasetEvaluator 클래스) 를 리스트에 담아서 DatasetEvaluator's' 로 만들어주고있지.
panoptic_img, segments_info = output["panoptic_seg"]
panoptic_img = panoptic_img.cpu().numpy()
if segments_info is None:
# i. 현재 내플젝(panoptic deeplab 이용한 치과파노라마 panoptic seg 플젝) 에선 아래의 print 가 출력됨. /21.3.26.16:06.
# print('j) (코드조사용 출력) 모델이내뱉은 아웃풋에 segments_info 가 없음!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
#
# i.21.3.26.21:42) ->즉, 바로아래의 Det2 기존 코멘트 설명대로일거임 아마도.
# 즉, 아래에서의 panoptic_label 변수(이게 모델(panoptic deeplab)이 출력한 정보인거지)는
# 그냥 단순히 category_id * label_divisor + instance_id 로 계산된것인듯.
# 그래서, self._predictions_json json파일(현재 내 구글드라이브에 저장되게해놨지) 열어보면
# cityscapes 의 방식이랑 좀 다른것을 볼수있음.
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label, and add 1 to panoptic_img since the official
# evaluation script uses 0 for VOID label.
label_divisor = self._metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_img):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = (
pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values()
)
segments_info.append(
{
"id": int(panoptic_label) + 1, # i. TODO 내플젝할때는 +1 제거해야함 /21.3.27.14:21. # i. ->다시생각해보니, 걍 +1을 해주든 +123을 해주든 상관없이 똑같을듯..?? 아직제대로생각완료못함. /21.3.28.12:59.
# "id": int(panoptic_label), # i. 내플젝 위해 +1 제거. /21.3.27.14:19.
"category_id": int(pred_class),
"isthing": bool(isthing), # i. 얘도 안적혀잇음...엥?? /21.3.26.16:12. ->_convert_category_id 에서 pop해주자나;;; /21.3.26.18:40.
}
)
# Official evaluation script uses 0 for VOID label. # i. TODO 음.. 기존Det2코멘트가 틀린것같은데.. +1 해줄필요없을것같은데??? 내생각맞나테스트해보자. /21.3.28.13:09.
panoptic_img += 1
# i. TODO ->내플젝에선 이거(panoptic_img += 1) 코멘트아웃해주면 될듯. # i. ->다시생각해보니, 걍 +1을 해주든 +123을 해주든 상관없이 똑같을듯..?? 아직제대로생각완료못함. /21.3.28.12:59.
# 내플젝에선 백그라운드도 하나의 foreground 처럼 프레딕션해주고있는데(시각화를위해),
# 그 백그라운드의 id가 0이니까. self._predictions_json json파일(현재 내 구글드라이브에 저장되게해놨지) 열어보면
# 모델(panoptic deeplab)이 프레딕션한 결과가 어떻게 출력되었나 확인가능. /21.3.26.20:22.
# i.21.3.26.23:03) 참고로, 혹시나중에 까먹을까봐 적으면,
# 보통은 panoptic seg 태스크에서 백그라운드는 프레딕션시 클래스 선택지에서 없게해줌. 그게 평가방식상 점수 유리하니까. coco 나 cityscapes 의 panoptic seg 평가시.
# 그래서 시각화시키면 백그라운드가 없음. 백그라운드도 전부 foreground 클래스들이 죄다 덮어버림(그래도 점수 안깎임).
# 근데 나는 시각화출력시에 백그라운드도 보여줘야 예쁘니까 백그라운드도 foreground 클래스중 하나로 해준거고.
# else:
# i. 현재 내플젝(panoptic deeplab 이용한 치과파노라마 panoptic seg 플젝) 에선 얜 출력안되고있음. /21.3.26.16:06.
# print('j) (코드조사용 출력) 모델이내뱉은 아웃풋에 segments_info 가 있음!!!!!!')
file_name = os.path.basename(input["file_name"]) # i. ex) (하나의 input 에서) 'file_name': '/content/datasetsJ/panopticSeg_dentPanoJ/inputOriPano/val/imp4_188.jpg' /21.3.26.22:49.
file_name_png = os.path.splitext(file_name)[0] + ".png" # i. ex) imp4_188.png 이런식으로 되겠지. /21.3.26.22:49.
with io.BytesIO() as out:
Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
segments_info = [self._convert_category_id(x) for x in segments_info]
# print(f'j) io.BytesIO() as out, out.getvalue(): {out.getvalue()}') # b'\x89PNG\r\n\x1a\n\x00\~~~~~~~~~~'
self._predictions.append(
{
"image_id": input["image_id"], # i. ex) 'imp4_188' /21.3.26.22:52.
"file_name": file_name_png, # i. ex) imp4_188.png /21.3.26.22:49.
# i. 이것만 출력안됨. 뭐지?????? /21.3.26.16:11.
# ->죠아래에서 pop 해주잖아;; 이거 pop 안해주면 json.dumps 안됨(TypeError: Object of type bytes is not JSON serializable). /21.3.26.18:40.
"png_string": out.getvalue(),
"segments_info": segments_info,
}
)
def evaluate(self):
comm.synchronize()
# print('j) COCOPanopticEvaluator.evaluate starts!!! cococococococococococococococococo')
self._predictions = comm.gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return
# PanopticApi requires local files
gt_json = PathManager.get_local_path(self._metadata.panoptic_json) # i. COCO형식으로 변환된 어노json파일 경로./21.3.10.12:02.에적어뒀던것. /21.3.26.13:26.
gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) # i. COCO형식으로 변환된 어노png파일들 있는 디렉토리./21.3.10.12:02.에적어뒀던것. /21.3.26.13:26.
with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
logger.info("Writing all panoptic predictions to {} ...".format(pred_dir))
for p in self._predictions:
with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
with open(gt_json, "r") as f:
json_data = json.load(f)
json_data["annotations"] = self._predictions
with PathManager.open(self._predictions_json, "w") as f:
f.write(json.dumps(json_data))
from panopticapi.evaluation import pq_compute
# with contextlib.redirect_stdout(io.StringIO()):
# i.21.3.27.18:34) print 출력좀 확인하려고 내가 좀 바꿔줌.
ioJ = io.StringIO()
with contextlib.redirect_stdout(ioJ):
pq_res = pq_compute(
gt_json, # i. COCO형식으로 변환된 어노json파일 경로. /21.3.26.22:41.
PathManager.get_local_path(self._predictions_json), # i. 모델(현재 panoptic deeplab)의 출력 json 경로.(위에서 gt_json 의 "annotations"를 바꿔서 만들어줬지.) /21.3.26.22:42.
gt_folder=gt_folder, # i. COCO형식으로 변환된 어노png파일들 있는 디렉토리. /21.3.26.22:55.
pred_folder=pred_dir, # i. 모델이 출력한 png 들을 넣어준 디렉토리 경로. (현재 임시디렉토리로 되어있지. 참고로 위에서 각 픽셀값들에다 +1해줬지. 내플젝에선 +1필요없을듯하지만.) /21.3.26.23:06.
)
print(f'j) got stdout: \n{ioJ.getvalue()}')
res = {}
res["PQ"] = 100 * pq_res["All"]["pq"]
res["SQ"] = 100 * pq_res["All"]["sq"]
res["RQ"] = 100 * pq_res["All"]["rq"]
res["PQ_th"] = 100 * pq_res["Things"]["pq"]
res["SQ_th"] = 100 * pq_res["Things"]["sq"]
res["RQ_th"] = 100 * pq_res["Things"]["rq"]
res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
results = OrderedDict({"panoptic_seg": res})
_print_panoptic_results(pq_res)
return results
def _print_panoptic_results(pq_res):
headers = ["", "PQ", "SQ", "RQ", "#categories"]
data = []
for name in ["All", "Things", "Stuff"]:
row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]]
data.append(row)
table = tabulate(
data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center"
)
logger.info("Panoptic Evaluation Results:\n" + table)
if __name__ == "__main__":
from detectron2.utils.logger import setup_logger
logger = setup_logger()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gt-json")
parser.add_argument("--gt-dir")
parser.add_argument("--pred-json")
parser.add_argument("--pred-dir")
args = parser.parse_args()
from panopticapi.evaluation import pq_compute
with contextlib.redirect_stdout(io.StringIO()):
pq_res = pq_compute(
args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir
)
_print_panoptic_results(pq_res)
| 47.563265 | 186 | 0.58766 |
import contextlib
import io
import itertools
import json
import logging
import numpy as np
import os
import tempfile
from collections import OrderedDict
from PIL import Image
from tabulate import tabulate
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from .evaluator import DatasetEvaluator
logger = logging.getLogger(__name__)
class COCOPanopticEvaluator(DatasetEvaluator):
def __init__(self, dataset_name, output_dir):
self._metadata = MetadataCatalog.get(dataset_name)
self._thing_contiguous_id_to_dataset_id = {
v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
}
self._stuff_contiguous_id_to_dataset_id = {
v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items()
}
PathManager.mkdirs(output_dir)
self._predictions_json = os.path.join(output_dir, "predictionsTestJ.json")
def reset(self):
self._predictions = []
def _convert_category_id(self, segment_info):
isthing = segment_info.pop("isthing", None)
if isthing is None:
return segment_info
if isthing is True:
segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[
segment_info["category_id"]
]
else:
segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[
segment_info["category_id"]
]
return segment_info
def process(self, inputs, outputs):
i.utils import id2rgb
for input, output in zip(inputs, outputs):
panoptic_img, segments_info = output["panoptic_seg"]
panoptic_img = panoptic_img.cpu().numpy()
if segments_info is None:
label_divisor = self._metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_img):
if panoptic_label == -1:
continue
pred_class = panoptic_label // label_divisor
isthing = (
pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values()
)
segments_info.append(
{
"id": int(panoptic_label) + 1, "isthing": bool(isthing),
}
)
file_name = os.path.basename(input["file_name"])
file_name_png = os.path.splitext(file_name)[0] + ".png"
with io.BytesIO() as out:
Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
segments_info = [self._convert_category_id(x) for x in segments_info]
end(
{
"image_id": input["image_id"],
"file_name": file_name_png,
"png_string": out.getvalue(),
"segments_info": segments_info,
}
)
def evaluate(self):
comm.synchronize()
self._predictions = comm.gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return
gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
gt_folder = PathManager.get_local_path(self._metadata.panoptic_root)
with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
logger.info("Writing all panoptic predictions to {} ...".format(pred_dir))
for p in self._predictions:
with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
with open(gt_json, "r") as f:
json_data = json.load(f)
json_data["annotations"] = self._predictions
with PathManager.open(self._predictions_json, "w") as f:
f.write(json.dumps(json_data))
from panopticapi.evaluation import pq_compute
ioJ = io.StringIO()
with contextlib.redirect_stdout(ioJ):
pq_res = pq_compute(
gt_json,
PathManager.get_local_path(self._predictions_json),
gt_folder=gt_folder,
pred_folder=pred_dir,
)
print(f'j) got stdout: \n{ioJ.getvalue()}')
res = {}
res["PQ"] = 100 * pq_res["All"]["pq"]
res["SQ"] = 100 * pq_res["All"]["sq"]
res["RQ"] = 100 * pq_res["All"]["rq"]
res["PQ_th"] = 100 * pq_res["Things"]["pq"]
res["SQ_th"] = 100 * pq_res["Things"]["sq"]
res["RQ_th"] = 100 * pq_res["Things"]["rq"]
res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
results = OrderedDict({"panoptic_seg": res})
_print_panoptic_results(pq_res)
return results
def _print_panoptic_results(pq_res):
headers = ["", "PQ", "SQ", "RQ", "#categories"]
data = []
for name in ["All", "Things", "Stuff"]:
row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]]
data.append(row)
table = tabulate(
data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center"
)
logger.info("Panoptic Evaluation Results:\n" + table)
if __name__ == "__main__":
from detectron2.utils.logger import setup_logger
logger = setup_logger()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gt-json")
parser.add_argument("--gt-dir")
parser.add_argument("--pred-json")
parser.add_argument("--pred-dir")
args = parser.parse_args()
from panopticapi.evaluation import pq_compute
with contextlib.redirect_stdout(io.StringIO()):
pq_res = pq_compute(
args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir
)
_print_panoptic_results(pq_res)
| true | true |
f7f58e1019d4aea7b585a250b57f6708dbd91c3f | 4,464 | py | Python | ethicophysics/sim.py | advancedresearch/ethicophysics | 52806b53d6d3ee92e1bd2a8c00f7728cebc9e684 | [
"Apache-2.0"
] | 7 | 2018-04-26T17:10:58.000Z | 2021-06-19T01:56:36.000Z | ethicophysics/sim.py | advancedresearch/ethicophysics | 52806b53d6d3ee92e1bd2a8c00f7728cebc9e684 | [
"Apache-2.0"
] | 46 | 2018-04-26T16:25:59.000Z | 2018-12-15T10:13:05.000Z | ethicophysics/sim.py | advancedresearch/ethicophysics | 52806b53d6d3ee92e1bd2a8c00f7728cebc9e684 | [
"Apache-2.0"
] | 1 | 2018-05-13T17:37:48.000Z | 2018-05-13T17:37:48.000Z | import numpy as np
dims = [('evil', 'good'), ('poor', 'rich'), ('unskilled', 'skilled'), ('tool', 'agent')]
ndim = len(dims)
# drives that the agents in the simulation are capable of having, as
# well as the effects that each drive has on the agent's soul
drives = [
('greed for possessions', ['evil', 'rich', 'skilled', 'agent']),
('hunger for power', ['evil', 'rich', 'skilled', 'agent']),
('desire to be of service', ['good', 'rich', 'skilled', 'agent']),
('desire to be lazy', ['poor', 'skilled']), # minimum action principle
('desire to be virtuous', ['good', 'agent']),
('desire to be employable', ['rich', 'tool']),
]
ndrives = len(drives)
nactors = 102
class Actor:
"""An actor is an object that exists in reality. Note that (literal)
tools can be actors, since a tool always changes the user of the
tool, or at least, does if the user of the tool is not a
superintelligence capable of resisting such nonsense.
"""
def __init__(self, mutable=True):
self.mutable = mutable
# random uniform is less realistic than random normal, but
# let's be a little paranoid and use a distribution with heavy
# tails. Voldemort started off somewhere.
self.soul = 2 * np.random.random(ndim) - 1
# random initialization of the mind seems like a safe
# assumption, cause y'all motherfuckers Do Not Pay Attention
self.mind = np.random.random((
def get_action(self, state):
return 2 * np.random.random() - 1
def suffer_effects_of_drives(self):
for drive in drives:
class State:
"""A state is a perfect representation of all of reality, since that
is what is potentially knowable. Should really extend this to the
case of imperfect discernment (some agents believe stupid shit
because they are akratic), since that is more relevant for
meatspace. But let's prove that superintelligences can coexist
with one another before we get around to proving that they can
coexist with humanity. Always gotta walk before we crawl.
"""
def __init__(self, actors):
self.actors = actors
# let's specify the two scariest superintelligences that we can, just
# to make things interesting. The whole point of this exercise is that
# Arthur will run circles around Mordred all fucking day long, or at
# least until Mordred unfucks his mind. But we assume here for the
# sake of argument that Mordred is an immutable evil
# superintelligence, since that's the case that worries people. If
# Mordred can be tamed, is he really Mordred? Also note that Arthur
# dying is not a loss condition, because the whole point of being good
# is that you can be replaced by your friends without any loss of
# honor. If Arthur dies on the first move, that is less
# good... fundamentally, Arthur relies on a sort of invisibility, that
# his true nature is unknown to the unwise. Since we are looking at
# the case of perfect discernment right now, let us just assume that
# Arthur cannot be killed until some number of rounds have passed,
# just because something like that is necessary for this naive
# model. Ultimately, once the real ethicophysics exists, it should be
# sufficient that Arthur *existed at some point in the past*. This is
# the lesson of Dumbledore in Harry Potter - he dies purely as a
# feint, and Harry is able to recreate all of his wondrous gifts
# simply by trying to live up to his example. I.e., if the shit is
# hooked up correctly, the cattle should be able to self-organize to
# defeat Mordred themselves, using Arthur's known-good
# reputation/honor as a coordination mechanism. Note that the deathly
# hallows are a cryptogram for Christianity and that it is
# Dumbledore's Christianity that drives his goodness. Christianity
# isn't the only true religion but it's a very good one for winning
# mind wars. And fundamentally every war is a mind war.
Arthur = Actor(mutable=False)
Arthur.soul = np.array([1, 1, 1, 1])
Mordred = Actor(mutable=False)
Mordred.soul = np.array([-1, 1, 1, 1])
# and here we specify everyone else, initialized randomly. sorry not
# sorry for calling y'all cattle
Cattle = [Actor() for i in range(nactors - 2)]
# here we specify the universe as it truly is. Will need to have one
# subjective universe per actor when we extend to the case of
# imperfect discernment
Reality = State([Arthur, Mordred] + Cattle)
| 45.090909 | 88 | 0.708333 | import numpy as np
dims = [('evil', 'good'), ('poor', 'rich'), ('unskilled', 'skilled'), ('tool', 'agent')]
ndim = len(dims)
drives = [
('greed for possessions', ['evil', 'rich', 'skilled', 'agent']),
('hunger for power', ['evil', 'rich', 'skilled', 'agent']),
('desire to be of service', ['good', 'rich', 'skilled', 'agent']),
('desire to be lazy', ['poor', 'skilled']), # minimum action principle
('desire to be virtuous', ['good', 'agent']),
('desire to be employable', ['rich', 'tool']),
]
ndrives = len(drives)
nactors = 102
class Actor:
"""An actor is an object that exists in reality. Note that (literal)
tools can be actors, since a tool always changes the user of the
tool, or at least, does if the user of the tool is not a
superintelligence capable of resisting such nonsense.
"""
def __init__(self, mutable=True):
self.mutable = mutable
# random uniform is less realistic than random normal, but
# let's be a little paranoid and use a distribution with heavy
self.soul = 2 * np.random.random(ndim) - 1
self.mind = np.random.random((
def get_action(self, state):
return 2 * np.random.random() - 1
def suffer_effects_of_drives(self):
for drive in drives:
class State:
"""A state is a perfect representation of all of reality, since that
is what is potentially knowable. Should really extend this to the
case of imperfect discernment (some agents believe stupid shit
because they are akratic), since that is more relevant for
meatspace. But let's prove that superintelligences can coexist
with one another before we get around to proving that they can
coexist with humanity. Always gotta walk before we crawl.
"""
def __init__(self, actors):
self.actors = actors
# to make things interesting. The whole point of this exercise is that
# Arthur will run circles around Mordred all fucking day long, or at
# least until Mordred unfucks his mind. But we assume here for the
# sake of argument that Mordred is an immutable evil
# superintelligence, since that's the case that worries people. If
# reputation/honor as a coordination mechanism. Note that the deathly
# hallows are a cryptogram for Christianity and that it is
# Dumbledore's Christianity that drives his goodness. Christianity
Arthur = Actor(mutable=False)
Arthur.soul = np.array([1, 1, 1, 1])
Mordred = Actor(mutable=False)
Mordred.soul = np.array([-1, 1, 1, 1])
Cattle = [Actor() for i in range(nactors - 2)]
# here we specify the universe as it truly is. Will need to have one
# subjective universe per actor when we extend to the case of
# imperfect discernment
Reality = State([Arthur, Mordred] + Cattle)
| false | true |
f7f5906a9982cc23d6cf123e100050c5490b92dd | 3,561 | py | Python | synapse/streams/config.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 1 | 2019-09-14T03:24:03.000Z | 2019-09-14T03:24:03.000Z | synapse/streams/config.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 4 | 2020-03-04T23:47:05.000Z | 2021-12-09T21:41:44.000Z | synapse/streams/config.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.api.errors import SynapseError
from synapse.http.servlet import parse_integer, parse_string
from synapse.types import StreamToken
logger = logging.getLogger(__name__)
MAX_LIMIT = 1000
class SourcePaginationConfig(object):
"""A configuration object which stores pagination parameters for a
specific event source."""
def __init__(self, from_key=None, to_key=None, direction="f", limit=None):
self.from_key = from_key
self.to_key = to_key
self.direction = "f" if direction == "f" else "b"
self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None
def __repr__(self):
return ("StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)") % (
self.from_key,
self.to_key,
self.direction,
self.limit,
)
class PaginationConfig(object):
"""A configuration object which stores pagination parameters."""
def __init__(self, from_token=None, to_token=None, direction="f", limit=None):
self.from_token = from_token
self.to_token = to_token
self.direction = "f" if direction == "f" else "b"
self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None
@classmethod
def from_request(cls, request, raise_invalid_params=True, default_limit=None):
direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"])
from_tok = parse_string(request, "from")
to_tok = parse_string(request, "to")
try:
if from_tok == "END":
from_tok = None # For backwards compat.
elif from_tok:
from_tok = StreamToken.from_string(from_tok)
except Exception:
raise SynapseError(400, "'from' paramater is invalid")
try:
if to_tok:
to_tok = StreamToken.from_string(to_tok)
except Exception:
raise SynapseError(400, "'to' paramater is invalid")
limit = parse_integer(request, "limit", default=default_limit)
if limit and limit < 0:
raise SynapseError(400, "Limit must be 0 or above")
try:
return PaginationConfig(from_tok, to_tok, direction, limit)
except Exception:
logger.exception("Failed to create pagination config")
raise SynapseError(400, "Invalid request.")
def __repr__(self):
return (
"PaginationConfig(from_tok=%r, to_tok=%r," " direction=%r, limit=%r)"
) % (self.from_token, self.to_token, self.direction, self.limit)
def get_source_config(self, source_name):
keyname = "%s_key" % source_name
return SourcePaginationConfig(
from_key=getattr(self.from_token, keyname),
to_key=getattr(self.to_token, keyname) if self.to_token else None,
direction=self.direction,
limit=self.limit,
)
| 34.240385 | 88 | 0.650941 |
import logging
from synapse.api.errors import SynapseError
from synapse.http.servlet import parse_integer, parse_string
from synapse.types import StreamToken
logger = logging.getLogger(__name__)
MAX_LIMIT = 1000
class SourcePaginationConfig(object):
def __init__(self, from_key=None, to_key=None, direction="f", limit=None):
self.from_key = from_key
self.to_key = to_key
self.direction = "f" if direction == "f" else "b"
self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None
def __repr__(self):
return ("StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)") % (
self.from_key,
self.to_key,
self.direction,
self.limit,
)
class PaginationConfig(object):
def __init__(self, from_token=None, to_token=None, direction="f", limit=None):
self.from_token = from_token
self.to_token = to_token
self.direction = "f" if direction == "f" else "b"
self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None
@classmethod
def from_request(cls, request, raise_invalid_params=True, default_limit=None):
direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"])
from_tok = parse_string(request, "from")
to_tok = parse_string(request, "to")
try:
if from_tok == "END":
from_tok = None
elif from_tok:
from_tok = StreamToken.from_string(from_tok)
except Exception:
raise SynapseError(400, "'from' paramater is invalid")
try:
if to_tok:
to_tok = StreamToken.from_string(to_tok)
except Exception:
raise SynapseError(400, "'to' paramater is invalid")
limit = parse_integer(request, "limit", default=default_limit)
if limit and limit < 0:
raise SynapseError(400, "Limit must be 0 or above")
try:
return PaginationConfig(from_tok, to_tok, direction, limit)
except Exception:
logger.exception("Failed to create pagination config")
raise SynapseError(400, "Invalid request.")
def __repr__(self):
return (
"PaginationConfig(from_tok=%r, to_tok=%r," " direction=%r, limit=%r)"
) % (self.from_token, self.to_token, self.direction, self.limit)
def get_source_config(self, source_name):
keyname = "%s_key" % source_name
return SourcePaginationConfig(
from_key=getattr(self.from_token, keyname),
to_key=getattr(self.to_token, keyname) if self.to_token else None,
direction=self.direction,
limit=self.limit,
)
| true | true |
f7f590a990f276e6b4e36e1e66cbe1ece8c4b733 | 3,721 | py | Python | di/utils/inspection/abstract.py | dlski/python-di | 04dcdf58f3cf820e2d2ba5086e4e89822ae1f409 | [
"MIT"
] | 8 | 2021-02-05T16:17:31.000Z | 2022-03-03T00:01:33.000Z | di/utils/inspection/abstract.py | dlski/python-di | 04dcdf58f3cf820e2d2ba5086e4e89822ae1f409 | [
"MIT"
] | null | null | null | di/utils/inspection/abstract.py | dlski/python-di | 04dcdf58f3cf820e2d2ba5086e4e89822ae1f409 | [
"MIT"
] | null | null | null | import dis
import inspect
import re
from types import DynamicClassAttribute
from typing import Callable, Iterable, Set, Type
from di.utils.inspection.typing import BUILTIN_TYPES
class AbstractInspector:
@classmethod
def is_abstract(cls, obj, duck_typing: bool = True) -> bool:
if inspect.isfunction(obj):
if duck_typing:
return cls.is_abstract_function(obj)
else:
return False
elif inspect.isclass(obj):
return cls.is_abstract_class(obj, duck_typing=duck_typing)
else:
return False
@classmethod
def is_abstract_function(cls, obj: Callable):
if not inspect.isfunction(obj):
raise TypeError(f"{obj!r} is not a function or method")
return cls.is_abstract_routine(obj)
@classmethod
def is_abstract_class(cls, obj: Type, duck_typing: bool = True):
if not inspect.isclass(obj):
raise TypeError(f"{obj!r} is not a class")
if inspect.isabstract(obj):
return True
elif not duck_typing:
return False
for name, fn in cls._all_class_fns(obj):
if cls.is_abstract_routine(fn):
return True
return False
@classmethod
def is_abstract_routine(cls, obj: Callable):
if not inspect.isroutine(obj):
raise TypeError(f"{obj!r} is not a routine")
result = cls._is_abstract_code_check(obj)
if result is not None:
return result
return cls._is_abstract_hard_check(obj)
@classmethod
def _is_abstract_code_check(cls, obj):
try:
src = inspect.getsource(obj)
return (
re.search(r"^\s*raise\s+NotImplemented", src, flags=re.MULTILINE)
is not None
)
except (OSError, TypeError):
pass
@classmethod
def _is_abstract_hard_check(cls, obj):
try:
closure_vars = inspect.getclosurevars(obj)
for value in closure_vars.builtins.values():
if value is NotImplementedError or value is NotImplemented:
break
else:
return False
except AttributeError:
# can not determine - probably native or builtin code
return False
try:
for instruction in dis.get_instructions(obj):
if instruction.opname == "RAISE_VARARGS" and instruction.arg == 1:
return True
except TypeError:
pass
return False
@classmethod
def _all_class_fns(cls, obj: Type):
for name, obj in cls._all_class_members(obj):
if inspect.isfunction(obj):
yield name, obj
elif isinstance(obj, (classmethod, staticmethod)):
yield name, obj.__func__
elif isinstance(obj, (property, DynamicClassAttribute)):
if obj.fget:
yield f"{name}.fget", obj.fget
if obj.fset:
yield f"{name}.fset", obj.fset
if obj.fdel:
yield f"{name}.fdel", obj.fdel
@classmethod
def _all_class_members(cls, obj: Type):
visited: Set[str] = set()
for clazz in cls._all_concrete_bases(obj):
for name, obj in clazz.__dict__.items():
if name in visited:
continue
visited.add(name)
yield name, obj
@classmethod
def _all_concrete_bases(cls, obj: Type) -> Iterable[Type]:
for clazz in obj.mro():
if clazz in BUILTIN_TYPES:
continue
yield clazz
| 32.929204 | 82 | 0.572964 | import dis
import inspect
import re
from types import DynamicClassAttribute
from typing import Callable, Iterable, Set, Type
from di.utils.inspection.typing import BUILTIN_TYPES
class AbstractInspector:
@classmethod
def is_abstract(cls, obj, duck_typing: bool = True) -> bool:
if inspect.isfunction(obj):
if duck_typing:
return cls.is_abstract_function(obj)
else:
return False
elif inspect.isclass(obj):
return cls.is_abstract_class(obj, duck_typing=duck_typing)
else:
return False
@classmethod
def is_abstract_function(cls, obj: Callable):
if not inspect.isfunction(obj):
raise TypeError(f"{obj!r} is not a function or method")
return cls.is_abstract_routine(obj)
@classmethod
def is_abstract_class(cls, obj: Type, duck_typing: bool = True):
if not inspect.isclass(obj):
raise TypeError(f"{obj!r} is not a class")
if inspect.isabstract(obj):
return True
elif not duck_typing:
return False
for name, fn in cls._all_class_fns(obj):
if cls.is_abstract_routine(fn):
return True
return False
@classmethod
def is_abstract_routine(cls, obj: Callable):
if not inspect.isroutine(obj):
raise TypeError(f"{obj!r} is not a routine")
result = cls._is_abstract_code_check(obj)
if result is not None:
return result
return cls._is_abstract_hard_check(obj)
@classmethod
def _is_abstract_code_check(cls, obj):
try:
src = inspect.getsource(obj)
return (
re.search(r"^\s*raise\s+NotImplemented", src, flags=re.MULTILINE)
is not None
)
except (OSError, TypeError):
pass
@classmethod
def _is_abstract_hard_check(cls, obj):
try:
closure_vars = inspect.getclosurevars(obj)
for value in closure_vars.builtins.values():
if value is NotImplementedError or value is NotImplemented:
break
else:
return False
except AttributeError:
return False
try:
for instruction in dis.get_instructions(obj):
if instruction.opname == "RAISE_VARARGS" and instruction.arg == 1:
return True
except TypeError:
pass
return False
@classmethod
def _all_class_fns(cls, obj: Type):
for name, obj in cls._all_class_members(obj):
if inspect.isfunction(obj):
yield name, obj
elif isinstance(obj, (classmethod, staticmethod)):
yield name, obj.__func__
elif isinstance(obj, (property, DynamicClassAttribute)):
if obj.fget:
yield f"{name}.fget", obj.fget
if obj.fset:
yield f"{name}.fset", obj.fset
if obj.fdel:
yield f"{name}.fdel", obj.fdel
@classmethod
def _all_class_members(cls, obj: Type):
visited: Set[str] = set()
for clazz in cls._all_concrete_bases(obj):
for name, obj in clazz.__dict__.items():
if name in visited:
continue
visited.add(name)
yield name, obj
@classmethod
def _all_concrete_bases(cls, obj: Type) -> Iterable[Type]:
for clazz in obj.mro():
if clazz in BUILTIN_TYPES:
continue
yield clazz
| true | true |
f7f590bdf8530fbf1636509f70f94d23f501a76e | 4,323 | py | Python | src/onelogin/saml2/xml_utils.py | colinjeanne/python3-saml | c45c0f8ae029254642db036cf06058ec9e302098 | [
"MIT"
] | null | null | null | src/onelogin/saml2/xml_utils.py | colinjeanne/python3-saml | c45c0f8ae029254642db036cf06058ec9e302098 | [
"MIT"
] | 1 | 2021-06-10T23:08:35.000Z | 2021-06-10T23:08:35.000Z | src/onelogin/saml2/xml_utils.py | colinjeanne/python3-saml | c45c0f8ae029254642db036cf06058ec9e302098 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" OneLogin_Saml2_XML class
Copyright (c) 2015, OneLogin, Inc.
All rights reserved.
Auxiliary class of OneLogin's Python Toolkit.
"""
from os.path import join, dirname
from lxml import etree
from onelogin.saml2 import compat
from onelogin.saml2.constants import OneLogin_Saml2_Constants
for prefix, url in OneLogin_Saml2_Constants.NSMAP.items():
etree.register_namespace(prefix, url)
class OneLogin_Saml2_XML(object):
_element_class = type(etree.Element('root'))
_parse_etree = staticmethod(etree.fromstring)
_schema_class = etree.XMLSchema
_text_class = compat.text_types
_unparse_etree = staticmethod(etree.tostring)
dump = staticmethod(etree.dump)
make_root = staticmethod(etree.Element)
make_child = staticmethod(etree.SubElement)
cleanup_namespaces = staticmethod(etree.cleanup_namespaces)
@staticmethod
def to_string(xml, **kwargs):
"""
Serialize an element to an encoded string representation of its XML tree.
:param xml: The root node
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:returns: string representation of xml
:rtype: string
"""
if isinstance(xml, OneLogin_Saml2_XML._text_class):
return xml
if isinstance(xml, OneLogin_Saml2_XML._element_class):
OneLogin_Saml2_XML.cleanup_namespaces(xml)
return OneLogin_Saml2_XML._unparse_etree(xml, **kwargs)
raise ValueError("unsupported type %r" % type(xml))
@staticmethod
def to_etree(xml):
"""
Parses an XML document or fragment from a string.
:param xml: the string to parse
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:returns: the root node
:rtype: OneLogin_Saml2_XML._element_class
"""
if isinstance(xml, OneLogin_Saml2_XML._element_class):
return xml
if isinstance(xml, OneLogin_Saml2_XML._text_class):
return OneLogin_Saml2_XML._parse_etree(xml)
raise ValueError('unsupported type %r' % type(xml))
@staticmethod
def validate_xml(xml, schema, debug=False):
"""
Validates a xml against a schema
:param xml: The xml that will be validated
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:param schema: The schema
:type schema: string
:param debug: If debug is active, the parse-errors will be showed
:type debug: bool
:returns: Error code or the DomDocument of the xml
:rtype: xml.dom.minidom.Document
"""
assert isinstance(schema, compat.str_type)
try:
xml = OneLogin_Saml2_XML.to_etree(xml)
except Exception as e:
if debug:
print(e)
return 'unloaded_xml'
schema_file = join(dirname(__file__), 'schemas', schema)
with open(schema_file, 'r') as f_schema:
xmlschema = OneLogin_Saml2_XML._schema_class(etree.parse(f_schema))
if not xmlschema.validate(xml):
if debug:
print('Errors validating the metadata: ')
for error in xmlschema.error_log:
print(error.message)
return 'invalid_xml'
return xml
@staticmethod
def query(dom, query, context=None):
"""
Extracts nodes that match the query from the Element
:param dom: The root of the lxml objet
:type: Element
:param query: Xpath Expresion
:type: string
:param context: Context Node
:type: DOMElement
:returns: The queried nodes
:rtype: list
"""
if context is None:
return dom.xpath(query, namespaces=OneLogin_Saml2_Constants.NSMAP)
else:
return context.xpath(query, namespaces=OneLogin_Saml2_Constants.NSMAP)
@staticmethod
def extract_tag_text(xml, tagname):
open_tag = compat.to_bytes("<%s" % tagname)
close_tag = compat.to_bytes("</%s>" % tagname)
xml = OneLogin_Saml2_XML.to_string(xml)
start = xml.find(open_tag)
assert start != -1
end = xml.find(close_tag, start) + len(close_tag)
assert end != -1
return compat.to_string(xml[start:end])
| 31.554745 | 82 | 0.643072 |
from os.path import join, dirname
from lxml import etree
from onelogin.saml2 import compat
from onelogin.saml2.constants import OneLogin_Saml2_Constants
for prefix, url in OneLogin_Saml2_Constants.NSMAP.items():
etree.register_namespace(prefix, url)
class OneLogin_Saml2_XML(object):
_element_class = type(etree.Element('root'))
_parse_etree = staticmethod(etree.fromstring)
_schema_class = etree.XMLSchema
_text_class = compat.text_types
_unparse_etree = staticmethod(etree.tostring)
dump = staticmethod(etree.dump)
make_root = staticmethod(etree.Element)
make_child = staticmethod(etree.SubElement)
cleanup_namespaces = staticmethod(etree.cleanup_namespaces)
@staticmethod
def to_string(xml, **kwargs):
if isinstance(xml, OneLogin_Saml2_XML._text_class):
return xml
if isinstance(xml, OneLogin_Saml2_XML._element_class):
OneLogin_Saml2_XML.cleanup_namespaces(xml)
return OneLogin_Saml2_XML._unparse_etree(xml, **kwargs)
raise ValueError("unsupported type %r" % type(xml))
@staticmethod
def to_etree(xml):
if isinstance(xml, OneLogin_Saml2_XML._element_class):
return xml
if isinstance(xml, OneLogin_Saml2_XML._text_class):
return OneLogin_Saml2_XML._parse_etree(xml)
raise ValueError('unsupported type %r' % type(xml))
@staticmethod
def validate_xml(xml, schema, debug=False):
assert isinstance(schema, compat.str_type)
try:
xml = OneLogin_Saml2_XML.to_etree(xml)
except Exception as e:
if debug:
print(e)
return 'unloaded_xml'
schema_file = join(dirname(__file__), 'schemas', schema)
with open(schema_file, 'r') as f_schema:
xmlschema = OneLogin_Saml2_XML._schema_class(etree.parse(f_schema))
if not xmlschema.validate(xml):
if debug:
print('Errors validating the metadata: ')
for error in xmlschema.error_log:
print(error.message)
return 'invalid_xml'
return xml
@staticmethod
def query(dom, query, context=None):
if context is None:
return dom.xpath(query, namespaces=OneLogin_Saml2_Constants.NSMAP)
else:
return context.xpath(query, namespaces=OneLogin_Saml2_Constants.NSMAP)
@staticmethod
def extract_tag_text(xml, tagname):
open_tag = compat.to_bytes("<%s" % tagname)
close_tag = compat.to_bytes("</%s>" % tagname)
xml = OneLogin_Saml2_XML.to_string(xml)
start = xml.find(open_tag)
assert start != -1
end = xml.find(close_tag, start) + len(close_tag)
assert end != -1
return compat.to_string(xml[start:end])
| true | true |
f7f590bdfafbd830ae8b6bd39eb4fd1d69c52e35 | 1,174 | py | Python | spInDP/PushBehavior.py | henkmollema/spInDP | a12f4bc6674565009c21179bbcd371f0982a13d7 | [
"MIT"
] | null | null | null | spInDP/PushBehavior.py | henkmollema/spInDP | a12f4bc6674565009c21179bbcd371f0982a13d7 | [
"MIT"
] | null | null | null | spInDP/PushBehavior.py | henkmollema/spInDP | a12f4bc6674565009c21179bbcd371f0982a13d7 | [
"MIT"
] | null | null | null | from spInDP.Behavior import Behavior
import time
from spInDP.AnimationController import AnimationController
class PushBehavior(Behavior):
frameNr = 1
lastZ = 0
animationController = None
remoteContext = None
def __init__(self, spider):
print("Initializing push (spider gap) behavior.")
super(PushBehavior, self).__init__(spider)
self.safeTransition()
def update(self):
jMagnitude = self.spider.remoteController.context.jMagnitude
angleModifier = 1
if(jMagnitude > 0.4):
speedModifier = jMagnitude * 2
if self.spider.remoteController.context.jX > 0:
time.sleep(self.spider.animationController.push(frameNr=self.frameNr, speedMod=speedModifier))
else:
time.sleep(self.spider.animationController.push(frameNr=self.frameNr, speedMod=speedModifier))
self.frameNr += 1
return
def safeTransition(self):
for x in [2,14,17]:
self.spider.servoController.move(servo=x,angle=-10, speed=100)
for x in [5,8,11]:
self.spider.servoController.move(servo=x,angle=10, speed=100) | 33.542857 | 110 | 0.655026 | from spInDP.Behavior import Behavior
import time
from spInDP.AnimationController import AnimationController
class PushBehavior(Behavior):
frameNr = 1
lastZ = 0
animationController = None
remoteContext = None
def __init__(self, spider):
print("Initializing push (spider gap) behavior.")
super(PushBehavior, self).__init__(spider)
self.safeTransition()
def update(self):
jMagnitude = self.spider.remoteController.context.jMagnitude
angleModifier = 1
if(jMagnitude > 0.4):
speedModifier = jMagnitude * 2
if self.spider.remoteController.context.jX > 0:
time.sleep(self.spider.animationController.push(frameNr=self.frameNr, speedMod=speedModifier))
else:
time.sleep(self.spider.animationController.push(frameNr=self.frameNr, speedMod=speedModifier))
self.frameNr += 1
return
def safeTransition(self):
for x in [2,14,17]:
self.spider.servoController.move(servo=x,angle=-10, speed=100)
for x in [5,8,11]:
self.spider.servoController.move(servo=x,angle=10, speed=100) | true | true |
f7f5914fbc095c47e45b16badfd3c89dd70d6ac4 | 35,189 | py | Python | rplugin/python3/denite/ui/default.py | skt041959/denite.nvim | 73f6567b683aaf43457d49bea7a5ed19816baec1 | [
"MIT"
] | null | null | null | rplugin/python3/denite/ui/default.py | skt041959/denite.nvim | 73f6567b683aaf43457d49bea7a5ed19816baec1 | [
"MIT"
] | null | null | null | rplugin/python3/denite/ui/default.py | skt041959/denite.nvim | 73f6567b683aaf43457d49bea7a5ed19816baec1 | [
"MIT"
] | null | null | null | # ============================================================================
# FILE: default.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import re
import typing
from denite.util import echo, error, clearmatch, regex_convert_py_vim
from denite.util import Nvim, UserContext, Candidates, Candidate
from denite.parent import SyncParent
class Default(object):
@property
def is_async(self) -> bool:
return self._is_async
def __init__(self, vim: Nvim) -> None:
self._vim = vim
self._denite: typing.Optional[SyncParent] = None
self._selected_candidates: typing.List[int] = []
self._candidates: Candidates = []
self._cursor = 0
self._entire_len = 0
self._result: typing.List[typing.Any] = []
self._context: UserContext = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._is_multi = False
self._is_async = False
self._matched_pattern = ''
self._displayed_texts: typing.List[str] = []
self._statusline_sources = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status: typing.Dict[str, typing.Any] = {}
self._prev_curpos: typing.List[typing.Any] = []
self._save_window_options: typing.Dict[str, typing.Any] = {}
self._sources_history: typing.List[typing.Any] = []
self._previous_text = ''
self._floating = False
self._filter_floating = False
self._updated = False
self._timers: typing.Dict[str, int] = {}
self._matched_range_id = -1
self._matched_char_id = -1
self._check_matchdelete = bool(self._vim.call(
'denite#util#check_matchdelete'))
def start(self, sources: typing.List[typing.Any],
context: UserContext) -> typing.List[typing.Any]:
if not self._denite:
# if hasattr(self._vim, 'run_coroutine'):
# self._denite = ASyncParent(self._vim)
# else:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._start_sources_queue(context)
return self._result
def do_action(self, action_name: str,
command: str = '', is_manual: bool = False) -> None:
if is_manual:
candidates = self._get_selected_candidates()
elif self._get_cursor_candidate():
candidates = [self._get_cursor_candidate()]
else:
candidates = []
if not self._denite or not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and post_action == 'open':
# Re-open denite buffer
prev_cursor = self._cursor
cursor_candidate = self._get_cursor_candidate()
self._init_buffer()
self.redraw(False)
if cursor_candidate == self._get_candidate(prev_cursor):
# Restore the cursor
self._move_to_pos(prev_cursor)
# Disable quit flag
is_quit = False
if not is_quit and is_manual:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if is_manual and self._context['sources_queue']:
self._context['input'] = ''
self._context['quick_move'] = ''
self._start_sources_queue(self._context)
return
def redraw(self, is_force: bool = True) -> None:
self._context['is_redraw'] = is_force
if is_force:
self._gather_candidates()
if self._update_candidates():
self._update_buffer()
else:
self._update_status()
self._context['is_redraw'] = False
def quit(self) -> None:
if self._denite:
self._denite.on_close(self._context)
self._quit_buffer()
self._result = []
return
def _restart(self) -> None:
self._context['input'] = ''
self._quit_buffer()
self._init_denite()
self._gather_candidates()
self._init_buffer()
self._update_candidates()
self._update_buffer()
def _start_sources_queue(self, context: UserContext) -> None:
if not context['sources_queue']:
return
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': context['path'],
})
self._start(context['sources_queue'][0], context)
if context['sources_queue']:
context['sources_queue'].pop(0)
context['path'] = self._context['path']
def _start(self, sources: typing.List[typing.Any],
context: UserContext) -> None:
from denite.ui.map import do_map
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
# Ignore command line window.
return
resume = self._initialized and context['resume']
if resume:
# Skip the initialization
update = ('immediately', 'immediately_1',
'cursor_pos', 'prev_winid',
'start_filter', 'quick_move')
for key in update:
self._context[key] = context[key]
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
if context['refresh']:
self.redraw()
self._move_to_pos(self._cursor)
else:
if self._context != context:
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._is_multi = len(sources) > 1
if not sources:
# Ignore empty sources.
error(self._vim, 'Empty sources')
return
self._init_denite()
self._gather_candidates()
self._update_candidates()
self._init_cursor()
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
self._update_displayed_texts()
self._update_buffer()
self._move_to_pos(self._cursor)
if self._context['quick_move'] and do_map(self, 'quick_move', []):
return
if self._context['start_filter']:
do_map(self, 'open_filter_buffer', [])
def _init_buffer(self) -> None:
self._prev_status = dict()
self._displayed_texts = []
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = int(self._context['prev_winid'])
self._winrestcmd = self._vim.call('winrestcmd')
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._resize_buffer(True)
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._bufvars = self._vim.current.buffer.vars
self._bufvars['denite'] = {
'buffer_name': self._context['buffer_name'],
}
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._save_window_options = {}
window_options = {
'colorcolumn',
'concealcursor',
'conceallevel',
'cursorcolumn',
'cursorline',
'foldcolumn',
'foldenable',
'list',
'number',
'relativenumber',
'signcolumn',
'spell',
'winfixheight',
'wrap',
}
for k in window_options:
self._save_window_options[k] = self._vim.current.window.options[k]
# Note: Have to use setlocal instead of "current.window.options"
# "current.window.options" changes global value instead of local in
# neovim.
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal conceallevel=3')
self._vim.command('setlocal concealcursor=inv')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nonumber')
self._vim.command('setlocal norelativenumber')
self._vim.command('setlocal nospell')
self._vim.command('setlocal winfixheight')
self._vim.command('setlocal nowrap')
if self._context['prompt']:
self._vim.command('setlocal signcolumn=yes')
else:
self._vim.command('setlocal signcolumn=no')
if self._context['cursorline']:
self._vim.command('setlocal cursorline')
options = self._vim.current.buffer.options
if self._floating:
# Disable ruler
self._vim.options['ruler'] = False
options['buftype'] = 'nofile'
options['bufhidden'] = 'delete'
options['swapfile'] = False
options['buflisted'] = False
options['modeline'] = False
options['modifiable'] = False
options['filetype'] = 'denite'
if self._vim.call('exists', '#WinEnter'):
self._vim.command('doautocmd WinEnter')
if self._vim.call('exists', '#BufWinEnter'):
self._vim.command('doautocmd BufWinEnter')
if not self._vim.call('has', 'nvim'):
# In Vim8, FileType autocmd is not fired after set filetype option.
self._vim.command('silent doautocmd FileType denite')
if self._context['auto_action']:
self._vim.command('autocmd denite '
'CursorMoved <buffer> '
'call denite#call_map("auto_action")')
self._init_syntax()
def _switch_buffer(self) -> None:
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if split != 'vertical' and not self._floating:
# Move the window to bottom
self._vim.command('wincmd J')
self._winrestcmd = ''
return
self._floating = split in ['floating', 'floating_relative']
self._filter_floating = False
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif self._floating:
# Use floating window
if self._vim.current.buffer.options['filetype'] != 'denite':
self._titlestring = self._vim.options['titlestring']
if split == 'floating':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': int(self._context['winrow']),
'col': int(self._context['wincol']),
'width': int(self._context['winwidth']),
'height': int(self._context['winheight']),
})
elif split == 'floating_relative':
opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] +
self._vim.call('winline') - 1)
if self._context['auto_resize']:
height = max(self._winheight, 1)
width = max(self._winwidth, 1)
else:
width = int(self._context['winwidth'])
height = int(self._context['winheight'])
if opened_pos + height + 3 > self._vim.eval('&lines'):
anchor = 'SW'
row = 0
self._context['filter_winrow'] = row + opened_pos
else:
anchor = 'NW'
row = 1
self._context['filter_winrow'] = row + height + opened_pos
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'cursor',
'row': row,
'col': 0,
'width': width,
'height': height,
'anchor': anchor,
})
elif self._context['filter_split_direction'] == 'floating':
self._titlestring = self._vim.options['titlestring']
self._filter_floating = True
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
bufname = '[denite]-' + self._context['buffer_name']
if self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', bufname)
vertical = 'vertical' if split == 'vertical' else ''
command = (
'buffer' if split
in ['no', 'tab', 'floating', 'floating_relative'] else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._get_direction(),
vertical,
command,
bufnr,
)
)
else:
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', bufname)
def _get_direction(self) -> str:
direction = str(self._context['direction'])
if direction == 'dynamictop' or direction == 'dynamicbottom':
self._update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self) -> typing.List[typing.Any]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _switch_prev_buffer(self) -> None:
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def _init_syntax(self) -> None:
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteInput ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
if self._floating:
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
if self._denite:
self._denite.init_syntax(self._context, self._is_multi)
def _update_candidates(self) -> bool:
if not self._denite:
return False
[self._is_async, pattern, statuses, self._entire_len,
self._candidates] = self._denite.filter_candidates(self._context)
prev_displayed_texts = self._displayed_texts
self._update_displayed_texts()
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
prev_statusline_sources = self._statusline_sources
self._statusline_sources = ' '.join(statuses)
if self._is_async:
self._start_timer('update_candidates')
else:
self._stop_timer('update_candidates')
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern or
self._statusline_sources != prev_statusline_sources)
if updated:
self._updated = True
self._start_timer('update_buffer')
if self._context['search'] and self._context['input']:
self._vim.call('setreg', '/', self._context['input'])
return self._updated
def _update_displayed_texts(self) -> None:
candidates_len = len(self._candidates)
if not self._is_async and self._context['auto_resize']:
winminheight = int(self._context['winminheight'])
max_height = min(int(self._context['winheight']),
self._get_max_height())
if (winminheight != -1 and candidates_len < winminheight):
self._winheight = winminheight
elif candidates_len > max_height:
self._winheight = max_height
elif candidates_len != self._winheight:
self._winheight = candidates_len
max_source_name_len = 0
if self._candidates:
max_source_name_len = max([
len(self._get_display_source_name(x['source_name']))
for x in self._candidates])
self._context['max_source_name_len'] = max_source_name_len
self._context['max_source_name_format'] = (
'{:<' + str(self._context['max_source_name_len']) + '}')
self._displayed_texts = [
self._get_candidate_display_text(i)
for i in range(0, candidates_len)
]
def _update_buffer(self) -> None:
is_current_buffer = self._bufnr == self._vim.current.buffer.number
self._update_status()
if self._check_matchdelete and self._context['match_highlight']:
matches = [x['id'] for x in
self._vim.call('getmatches', self._winid)]
if self._matched_range_id in matches:
self._vim.call('matchdelete',
self._matched_range_id, self._winid)
self._matched_range_id = -1
if self._matched_char_id in matches:
self._vim.call('matchdelete',
self._matched_char_id, self._winid)
self._matched_char_id = -1
if self._matched_pattern != '':
self._matched_range_id = self._vim.call(
'matchadd', 'deniteMatchedRange',
r'\c' + regex_convert_py_vim(self._matched_pattern),
10, -1, {'window': self._winid})
matched_char_pattern = '[{}]'.format(re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._matched_char_id = self._vim.call(
'matchadd', 'deniteMatchedChar',
matched_char_pattern,
10, -1, {'window': self._winid})
prev_linenr = self._vim.call('line', '.')
prev_candidate = self._get_cursor_candidate()
buffer = self._vim.buffers[self._bufnr]
buffer.options['modifiable'] = True
self._vim.vars['denite#_candidates'] = [
x['word'] for x in self._candidates]
buffer[:] = self._displayed_texts
buffer.options['modifiable'] = False
self._previous_text = self._context['input']
self._resize_buffer(is_current_buffer)
is_changed = (self._context['reversed'] or
(is_current_buffer and
self._previous_text != self._context['input']))
if self._updated and is_changed:
if not is_current_buffer:
save_winid = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
self._init_cursor()
self._move_to_pos(self._cursor)
if not is_current_buffer:
self._vim.call('win_gotoid', save_winid)
elif is_current_buffer:
self._vim.call('cursor', [prev_linenr, 0])
if is_current_buffer:
if (self._context['auto_action'] and
prev_candidate != self._get_cursor_candidate()):
self.do_action(self._context['auto_action'])
self._updated = False
self._stop_timer('update_buffer')
def _update_status(self) -> None:
inpt = ''
if self._context['input']:
inpt = self._context['input'] + ' '
if self._context['error_messages']:
inpt = '[ERROR] ' + inpt
path = '[' + self._context['path'] + ']'
status = {
'input': inpt,
'sources': self._statusline_sources,
'path': path,
# Extra
'buffer_name': self._context['buffer_name'],
'line_total': len(self._candidates),
}
if status == self._prev_status:
return
self._bufvars['denite_statusline'] = status
self._prev_status = status
linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))"
if self._context['statusline']:
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = (
"%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} " +
" %{denite#get_status('path')}%*" +
"%{" + linenr + "}%*")
else:
winnr = self._vim.call('win_id2win', self._winid)
self._vim.call('setwinvar', winnr, '&statusline', (
"%#deniteInput#%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')}%*" +
"%#deniteStatusLineNumber#%{" + linenr + "}%*"))
def _get_display_source_name(self, name: str) -> str:
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def _get_candidate_display_text(self, index: int) -> str:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self._context['max_source_name_format'].format(
self._get_display_source_name(candidate['source_name'])))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (self._context['selected_icon'] # type: ignore
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def _get_max_height(self) -> int:
return int(self._vim.options['lines']) if not self._floating else (
int(self._vim.options['lines']) -
int(self._context['winrow']) -
int(self._vim.options['cmdheight']))
def _resize_buffer(self, is_current_buffer: bool) -> None:
split = self._context['split']
if (split == 'no' or split == 'tab' or
self._vim.call('winnr', '$') == 1):
return
winheight = max(self._winheight, 1)
winwidth = max(self._winwidth, 1)
is_vertical = split == 'vertical'
if not is_current_buffer:
restore = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
if not is_vertical and self._vim.current.window.height != winheight:
if self._floating:
wincol = int(self._context['winrow'])
row = wincol
if split == 'floating':
if self._context['auto_resize'] and row > 1:
row += int(self._context['winheight'])
row -= self._winheight
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'editor',
'row': row,
'col': int(self._context['wincol']),
'width': winwidth,
'height': winheight,
})
filter_row = 0 if wincol == 1 else row + winheight
filter_col = int(self._context['wincol'])
elif split == 'floating_relative':
init_pos = self._vim.call('nvim_win_get_config',
self._winid)
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'win',
'win': init_pos['win'],
'row': init_pos['row'],
'col': init_pos['col'],
'width': winwidth,
'height': winheight,
})
filter_col = init_pos['col']
if init_pos['anchor'] == 'NW':
winpos = self._vim.call('nvim_win_get_position',
self._winid)
filter_row = winpos[0] + winheight
filter_winid = self._vim.vars['denite#_filter_winid']
self._context['filter_winrow'] = row
if self._vim.call('win_id2win', filter_winid) > 0:
self._vim.call('nvim_win_set_config', filter_winid, {
'relative': 'editor',
'row': filter_row,
'col': filter_col,
})
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
if not is_current_buffer:
self._vim.call('win_gotoid', restore)
def _check_do_option(self) -> bool:
if self._context['do'] != '':
self._do_command(self._context['do'])
return True
elif (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self._do_immediately()
return True
return not (self._context['empty'] or
self._is_async or self._candidates)
def _check_move_option(self) -> None:
if self._context['cursor_pos'].isnumeric():
self._cursor = int(self._context['cursor_pos']) + 1
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self._move_to_last_line()
def _do_immediately(self) -> None:
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
# Jump to denite window
self._init_buffer()
self.do_action('default')
candidate = self._get_cursor_candidate()
if not candidate:
return
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor, len(self._candidates),
candidate.get('abbr', candidate['word'])))
if goto:
# Move to the previous window
self._vim.command('wincmd p')
def _do_command(self, command: str) -> None:
self._init_cursor()
cursor = 1
while cursor < len(self._candidates):
self.do_action('default', command)
self._move_to_next_line()
self._quit_buffer()
def _cleanup(self) -> None:
self._stop_timer('update_candidates')
self._stop_timer('update_buffer')
if self._vim.current.buffer.number == self._bufnr:
self._cursor = self._vim.call('line', '.')
# Note: Close filter window before preview window
self._vim.call('denite#filter#_close_filter_window')
if not self._context['has_preview_window']:
self._vim.command('pclose!')
# Clear previewed buffers
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('highlight! link CursorLine CursorLine')
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def _close_current_window(self) -> None:
if self._vim.call('winnr', '$') == 1:
self._vim.command('buffer #')
else:
self._vim.command('close!')
def _quit_buffer(self) -> None:
self._cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
# Denite buffer is already closed
return
winids = self._vim.call('win_findbuf',
self._vim.vars['denite#_filter_bufnr'])
if winids:
# Quit filter buffer
self._vim.call('win_gotoid', winids[0])
self._close_current_window()
# Move to denite window
self._vim.call('win_gotoid', self._winid)
# Restore the window
if self._context['split'] == 'no':
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._close_current_window()
self._vim.call('win_gotoid', self._prev_winid)
# Restore the position
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
# Note: execute restcmd twice to restore layout properly
self._vim.command(self._winrestcmd)
self._vim.command(self._winrestcmd)
clearmatch(self._vim)
def _get_cursor_candidate(self) -> Candidate:
return self._get_candidate(self._cursor)
def _get_candidate(self, pos: int) -> Candidate:
if not self._candidates or pos > len(self._candidates):
return {}
return self._candidates[pos - 1]
def _get_selected_candidates(self) -> Candidates:
if not self._selected_candidates:
return [self._get_cursor_candidate()
] if self._get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def _init_denite(self) -> None:
if self._denite:
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = int(self._context['winheight'])
self._winwidth = int(self._context['winwidth'])
def _gather_candidates(self) -> None:
self._selected_candidates = []
if self._denite:
self._denite.gather_candidates(self._context)
def _init_cursor(self) -> None:
if self._context['reversed']:
self._move_to_last_line()
self._vim.command('normal! zb')
else:
self._move_to_first_line()
def _move_to_pos(self, pos: int) -> None:
self._vim.call('cursor', pos, 0)
self._cursor = pos
def _move_to_next_line(self) -> None:
if self._cursor < len(self._candidates):
self._cursor += 1
def _move_to_prev_line(self) -> None:
if self._cursor >= 1:
self._cursor -= 1
def _move_to_first_line(self) -> None:
self._cursor = 1
def _move_to_last_line(self) -> None:
self._cursor = len(self._candidates)
def _start_timer(self, key: str) -> None:
if key in self._timers:
return
if key == 'update_candidates':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_candidates_timer', self._bufnr)
elif key == 'update_buffer':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_buffer_timer', self._bufnr)
def _stop_timer(self, key: str) -> None:
if key not in self._timers:
return
self._vim.call('timer_stop', self._timers[key])
# Note: After timer_stop is called, self._timers may be removed
if key in self._timers:
self._timers.pop(key)
| 38.207383 | 79 | 0.548495 |
import re
import typing
from denite.util import echo, error, clearmatch, regex_convert_py_vim
from denite.util import Nvim, UserContext, Candidates, Candidate
from denite.parent import SyncParent
class Default(object):
@property
def is_async(self) -> bool:
return self._is_async
def __init__(self, vim: Nvim) -> None:
self._vim = vim
self._denite: typing.Optional[SyncParent] = None
self._selected_candidates: typing.List[int] = []
self._candidates: Candidates = []
self._cursor = 0
self._entire_len = 0
self._result: typing.List[typing.Any] = []
self._context: UserContext = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._is_multi = False
self._is_async = False
self._matched_pattern = ''
self._displayed_texts: typing.List[str] = []
self._statusline_sources = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status: typing.Dict[str, typing.Any] = {}
self._prev_curpos: typing.List[typing.Any] = []
self._save_window_options: typing.Dict[str, typing.Any] = {}
self._sources_history: typing.List[typing.Any] = []
self._previous_text = ''
self._floating = False
self._filter_floating = False
self._updated = False
self._timers: typing.Dict[str, int] = {}
self._matched_range_id = -1
self._matched_char_id = -1
self._check_matchdelete = bool(self._vim.call(
'denite#util#check_matchdelete'))
def start(self, sources: typing.List[typing.Any],
context: UserContext) -> typing.List[typing.Any]:
if not self._denite:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._start_sources_queue(context)
return self._result
def do_action(self, action_name: str,
command: str = '', is_manual: bool = False) -> None:
if is_manual:
candidates = self._get_selected_candidates()
elif self._get_cursor_candidate():
candidates = [self._get_cursor_candidate()]
else:
candidates = []
if not self._denite or not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and post_action == 'open':
prev_cursor = self._cursor
cursor_candidate = self._get_cursor_candidate()
self._init_buffer()
self.redraw(False)
if cursor_candidate == self._get_candidate(prev_cursor):
self._move_to_pos(prev_cursor)
is_quit = False
if not is_quit and is_manual:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if is_manual and self._context['sources_queue']:
self._context['input'] = ''
self._context['quick_move'] = ''
self._start_sources_queue(self._context)
return
def redraw(self, is_force: bool = True) -> None:
self._context['is_redraw'] = is_force
if is_force:
self._gather_candidates()
if self._update_candidates():
self._update_buffer()
else:
self._update_status()
self._context['is_redraw'] = False
def quit(self) -> None:
if self._denite:
self._denite.on_close(self._context)
self._quit_buffer()
self._result = []
return
def _restart(self) -> None:
self._context['input'] = ''
self._quit_buffer()
self._init_denite()
self._gather_candidates()
self._init_buffer()
self._update_candidates()
self._update_buffer()
def _start_sources_queue(self, context: UserContext) -> None:
if not context['sources_queue']:
return
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': context['path'],
})
self._start(context['sources_queue'][0], context)
if context['sources_queue']:
context['sources_queue'].pop(0)
context['path'] = self._context['path']
def _start(self, sources: typing.List[typing.Any],
context: UserContext) -> None:
from denite.ui.map import do_map
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
return
resume = self._initialized and context['resume']
if resume:
update = ('immediately', 'immediately_1',
'cursor_pos', 'prev_winid',
'start_filter', 'quick_move')
for key in update:
self._context[key] = context[key]
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
if context['refresh']:
self.redraw()
self._move_to_pos(self._cursor)
else:
if self._context != context:
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._is_multi = len(sources) > 1
if not sources:
error(self._vim, 'Empty sources')
return
self._init_denite()
self._gather_candidates()
self._update_candidates()
self._init_cursor()
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
self._update_displayed_texts()
self._update_buffer()
self._move_to_pos(self._cursor)
if self._context['quick_move'] and do_map(self, 'quick_move', []):
return
if self._context['start_filter']:
do_map(self, 'open_filter_buffer', [])
def _init_buffer(self) -> None:
self._prev_status = dict()
self._displayed_texts = []
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = int(self._context['prev_winid'])
self._winrestcmd = self._vim.call('winrestcmd')
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._resize_buffer(True)
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._bufvars = self._vim.current.buffer.vars
self._bufvars['denite'] = {
'buffer_name': self._context['buffer_name'],
}
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._save_window_options = {}
window_options = {
'colorcolumn',
'concealcursor',
'conceallevel',
'cursorcolumn',
'cursorline',
'foldcolumn',
'foldenable',
'list',
'number',
'relativenumber',
'signcolumn',
'spell',
'winfixheight',
'wrap',
}
for k in window_options:
self._save_window_options[k] = self._vim.current.window.options[k]
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal conceallevel=3')
self._vim.command('setlocal concealcursor=inv')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nonumber')
self._vim.command('setlocal norelativenumber')
self._vim.command('setlocal nospell')
self._vim.command('setlocal winfixheight')
self._vim.command('setlocal nowrap')
if self._context['prompt']:
self._vim.command('setlocal signcolumn=yes')
else:
self._vim.command('setlocal signcolumn=no')
if self._context['cursorline']:
self._vim.command('setlocal cursorline')
options = self._vim.current.buffer.options
if self._floating:
self._vim.options['ruler'] = False
options['buftype'] = 'nofile'
options['bufhidden'] = 'delete'
options['swapfile'] = False
options['buflisted'] = False
options['modeline'] = False
options['modifiable'] = False
options['filetype'] = 'denite'
if self._vim.call('exists', '#WinEnter'):
self._vim.command('doautocmd WinEnter')
if self._vim.call('exists', '#BufWinEnter'):
self._vim.command('doautocmd BufWinEnter')
if not self._vim.call('has', 'nvim'):
self._vim.command('silent doautocmd FileType denite')
if self._context['auto_action']:
self._vim.command('autocmd denite '
'CursorMoved <buffer> '
'call denite#call_map("auto_action")')
self._init_syntax()
def _switch_buffer(self) -> None:
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if split != 'vertical' and not self._floating:
self._vim.command('wincmd J')
self._winrestcmd = ''
return
self._floating = split in ['floating', 'floating_relative']
self._filter_floating = False
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif self._floating:
if self._vim.current.buffer.options['filetype'] != 'denite':
self._titlestring = self._vim.options['titlestring']
if split == 'floating':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': int(self._context['winrow']),
'col': int(self._context['wincol']),
'width': int(self._context['winwidth']),
'height': int(self._context['winheight']),
})
elif split == 'floating_relative':
opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] +
self._vim.call('winline') - 1)
if self._context['auto_resize']:
height = max(self._winheight, 1)
width = max(self._winwidth, 1)
else:
width = int(self._context['winwidth'])
height = int(self._context['winheight'])
if opened_pos + height + 3 > self._vim.eval('&lines'):
anchor = 'SW'
row = 0
self._context['filter_winrow'] = row + opened_pos
else:
anchor = 'NW'
row = 1
self._context['filter_winrow'] = row + height + opened_pos
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'cursor',
'row': row,
'col': 0,
'width': width,
'height': height,
'anchor': anchor,
})
elif self._context['filter_split_direction'] == 'floating':
self._titlestring = self._vim.options['titlestring']
self._filter_floating = True
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
bufname = '[denite]-' + self._context['buffer_name']
if self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', bufname)
vertical = 'vertical' if split == 'vertical' else ''
command = (
'buffer' if split
in ['no', 'tab', 'floating', 'floating_relative'] else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._get_direction(),
vertical,
command,
bufnr,
)
)
else:
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', bufname)
def _get_direction(self) -> str:
direction = str(self._context['direction'])
if direction == 'dynamictop' or direction == 'dynamicbottom':
self._update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self) -> typing.List[typing.Any]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _switch_prev_buffer(self) -> None:
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def _init_syntax(self) -> None:
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteInput ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
if self._floating:
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
if self._denite:
self._denite.init_syntax(self._context, self._is_multi)
def _update_candidates(self) -> bool:
if not self._denite:
return False
[self._is_async, pattern, statuses, self._entire_len,
self._candidates] = self._denite.filter_candidates(self._context)
prev_displayed_texts = self._displayed_texts
self._update_displayed_texts()
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
prev_statusline_sources = self._statusline_sources
self._statusline_sources = ' '.join(statuses)
if self._is_async:
self._start_timer('update_candidates')
else:
self._stop_timer('update_candidates')
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern or
self._statusline_sources != prev_statusline_sources)
if updated:
self._updated = True
self._start_timer('update_buffer')
if self._context['search'] and self._context['input']:
self._vim.call('setreg', '/', self._context['input'])
return self._updated
def _update_displayed_texts(self) -> None:
candidates_len = len(self._candidates)
if not self._is_async and self._context['auto_resize']:
winminheight = int(self._context['winminheight'])
max_height = min(int(self._context['winheight']),
self._get_max_height())
if (winminheight != -1 and candidates_len < winminheight):
self._winheight = winminheight
elif candidates_len > max_height:
self._winheight = max_height
elif candidates_len != self._winheight:
self._winheight = candidates_len
max_source_name_len = 0
if self._candidates:
max_source_name_len = max([
len(self._get_display_source_name(x['source_name']))
for x in self._candidates])
self._context['max_source_name_len'] = max_source_name_len
self._context['max_source_name_format'] = (
'{:<' + str(self._context['max_source_name_len']) + '}')
self._displayed_texts = [
self._get_candidate_display_text(i)
for i in range(0, candidates_len)
]
def _update_buffer(self) -> None:
is_current_buffer = self._bufnr == self._vim.current.buffer.number
self._update_status()
if self._check_matchdelete and self._context['match_highlight']:
matches = [x['id'] for x in
self._vim.call('getmatches', self._winid)]
if self._matched_range_id in matches:
self._vim.call('matchdelete',
self._matched_range_id, self._winid)
self._matched_range_id = -1
if self._matched_char_id in matches:
self._vim.call('matchdelete',
self._matched_char_id, self._winid)
self._matched_char_id = -1
if self._matched_pattern != '':
self._matched_range_id = self._vim.call(
'matchadd', 'deniteMatchedRange',
r'\c' + regex_convert_py_vim(self._matched_pattern),
10, -1, {'window': self._winid})
matched_char_pattern = '[{}]'.format(re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._matched_char_id = self._vim.call(
'matchadd', 'deniteMatchedChar',
matched_char_pattern,
10, -1, {'window': self._winid})
prev_linenr = self._vim.call('line', '.')
prev_candidate = self._get_cursor_candidate()
buffer = self._vim.buffers[self._bufnr]
buffer.options['modifiable'] = True
self._vim.vars['denite#_candidates'] = [
x['word'] for x in self._candidates]
buffer[:] = self._displayed_texts
buffer.options['modifiable'] = False
self._previous_text = self._context['input']
self._resize_buffer(is_current_buffer)
is_changed = (self._context['reversed'] or
(is_current_buffer and
self._previous_text != self._context['input']))
if self._updated and is_changed:
if not is_current_buffer:
save_winid = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
self._init_cursor()
self._move_to_pos(self._cursor)
if not is_current_buffer:
self._vim.call('win_gotoid', save_winid)
elif is_current_buffer:
self._vim.call('cursor', [prev_linenr, 0])
if is_current_buffer:
if (self._context['auto_action'] and
prev_candidate != self._get_cursor_candidate()):
self.do_action(self._context['auto_action'])
self._updated = False
self._stop_timer('update_buffer')
def _update_status(self) -> None:
inpt = ''
if self._context['input']:
inpt = self._context['input'] + ' '
if self._context['error_messages']:
inpt = '[ERROR] ' + inpt
path = '[' + self._context['path'] + ']'
status = {
'input': inpt,
'sources': self._statusline_sources,
'path': path,
'buffer_name': self._context['buffer_name'],
'line_total': len(self._candidates),
}
if status == self._prev_status:
return
self._bufvars['denite_statusline'] = status
self._prev_status = status
linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))"
if self._context['statusline']:
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = (
"%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} " +
" %{denite#get_status('path')}%*" +
"%{" + linenr + "}%*")
else:
winnr = self._vim.call('win_id2win', self._winid)
self._vim.call('setwinvar', winnr, '&statusline', (
"%#deniteInput#%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')}%*" +
"%#deniteStatusLineNumber#%{" + linenr + "}%*"))
def _get_display_source_name(self, name: str) -> str:
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def _get_candidate_display_text(self, index: int) -> str:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self._context['max_source_name_format'].format(
self._get_display_source_name(candidate['source_name'])))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (self._context['selected_icon']
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def _get_max_height(self) -> int:
return int(self._vim.options['lines']) if not self._floating else (
int(self._vim.options['lines']) -
int(self._context['winrow']) -
int(self._vim.options['cmdheight']))
def _resize_buffer(self, is_current_buffer: bool) -> None:
split = self._context['split']
if (split == 'no' or split == 'tab' or
self._vim.call('winnr', '$') == 1):
return
winheight = max(self._winheight, 1)
winwidth = max(self._winwidth, 1)
is_vertical = split == 'vertical'
if not is_current_buffer:
restore = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
if not is_vertical and self._vim.current.window.height != winheight:
if self._floating:
wincol = int(self._context['winrow'])
row = wincol
if split == 'floating':
if self._context['auto_resize'] and row > 1:
row += int(self._context['winheight'])
row -= self._winheight
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'editor',
'row': row,
'col': int(self._context['wincol']),
'width': winwidth,
'height': winheight,
})
filter_row = 0 if wincol == 1 else row + winheight
filter_col = int(self._context['wincol'])
elif split == 'floating_relative':
init_pos = self._vim.call('nvim_win_get_config',
self._winid)
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'win',
'win': init_pos['win'],
'row': init_pos['row'],
'col': init_pos['col'],
'width': winwidth,
'height': winheight,
})
filter_col = init_pos['col']
if init_pos['anchor'] == 'NW':
winpos = self._vim.call('nvim_win_get_position',
self._winid)
filter_row = winpos[0] + winheight
filter_winid = self._vim.vars['denite#_filter_winid']
self._context['filter_winrow'] = row
if self._vim.call('win_id2win', filter_winid) > 0:
self._vim.call('nvim_win_set_config', filter_winid, {
'relative': 'editor',
'row': filter_row,
'col': filter_col,
})
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
if not is_current_buffer:
self._vim.call('win_gotoid', restore)
def _check_do_option(self) -> bool:
if self._context['do'] != '':
self._do_command(self._context['do'])
return True
elif (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self._do_immediately()
return True
return not (self._context['empty'] or
self._is_async or self._candidates)
def _check_move_option(self) -> None:
if self._context['cursor_pos'].isnumeric():
self._cursor = int(self._context['cursor_pos']) + 1
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self._move_to_last_line()
def _do_immediately(self) -> None:
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
self._init_buffer()
self.do_action('default')
candidate = self._get_cursor_candidate()
if not candidate:
return
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor, len(self._candidates),
candidate.get('abbr', candidate['word'])))
if goto:
self._vim.command('wincmd p')
def _do_command(self, command: str) -> None:
self._init_cursor()
cursor = 1
while cursor < len(self._candidates):
self.do_action('default', command)
self._move_to_next_line()
self._quit_buffer()
def _cleanup(self) -> None:
self._stop_timer('update_candidates')
self._stop_timer('update_buffer')
if self._vim.current.buffer.number == self._bufnr:
self._cursor = self._vim.call('line', '.')
self._vim.call('denite#filter#_close_filter_window')
if not self._context['has_preview_window']:
self._vim.command('pclose!')
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('highlight! link CursorLine CursorLine')
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def _close_current_window(self) -> None:
if self._vim.call('winnr', '$') == 1:
self._vim.command('buffer #')
else:
self._vim.command('close!')
def _quit_buffer(self) -> None:
self._cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
return
winids = self._vim.call('win_findbuf',
self._vim.vars['denite#_filter_bufnr'])
if winids:
self._vim.call('win_gotoid', winids[0])
self._close_current_window()
self._vim.call('win_gotoid', self._winid)
if self._context['split'] == 'no':
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._close_current_window()
self._vim.call('win_gotoid', self._prev_winid)
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
self._vim.command(self._winrestcmd)
self._vim.command(self._winrestcmd)
clearmatch(self._vim)
def _get_cursor_candidate(self) -> Candidate:
return self._get_candidate(self._cursor)
def _get_candidate(self, pos: int) -> Candidate:
if not self._candidates or pos > len(self._candidates):
return {}
return self._candidates[pos - 1]
def _get_selected_candidates(self) -> Candidates:
if not self._selected_candidates:
return [self._get_cursor_candidate()
] if self._get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def _init_denite(self) -> None:
if self._denite:
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = int(self._context['winheight'])
self._winwidth = int(self._context['winwidth'])
def _gather_candidates(self) -> None:
self._selected_candidates = []
if self._denite:
self._denite.gather_candidates(self._context)
def _init_cursor(self) -> None:
if self._context['reversed']:
self._move_to_last_line()
self._vim.command('normal! zb')
else:
self._move_to_first_line()
def _move_to_pos(self, pos: int) -> None:
self._vim.call('cursor', pos, 0)
self._cursor = pos
def _move_to_next_line(self) -> None:
if self._cursor < len(self._candidates):
self._cursor += 1
def _move_to_prev_line(self) -> None:
if self._cursor >= 1:
self._cursor -= 1
def _move_to_first_line(self) -> None:
self._cursor = 1
def _move_to_last_line(self) -> None:
self._cursor = len(self._candidates)
def _start_timer(self, key: str) -> None:
if key in self._timers:
return
if key == 'update_candidates':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_candidates_timer', self._bufnr)
elif key == 'update_buffer':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_buffer_timer', self._bufnr)
def _stop_timer(self, key: str) -> None:
if key not in self._timers:
return
self._vim.call('timer_stop', self._timers[key])
if key in self._timers:
self._timers.pop(key)
| true | true |
f7f592f03076ad9090c98d42b193285284984cb3 | 18,667 | py | Python | cogs/general.py | SohamGhugare/coding-bot-v4 | 2ffe614d4747e0a47725d4c155326a73a2f80f80 | [
"MIT"
] | null | null | null | cogs/general.py | SohamGhugare/coding-bot-v4 | 2ffe614d4747e0a47725d4c155326a73a2f80f80 | [
"MIT"
] | null | null | null | cogs/general.py | SohamGhugare/coding-bot-v4 | 2ffe614d4747e0a47725d4c155326a73a2f80f80 | [
"MIT"
] | null | null | null | """
Coding Bot v4
~~~~~~~~~~~~~~~~~~
This file contains elements that are under the following licenses:
Copyright (c) 2015 Rapptz
license MIT, see
https://github.com/Rapptz/RoboDanny/blob/e1c3c28fe20eb192463f7fc224a399141f0d915d/LICENSE.txt
for more details.
"""
import discord
import time
import asyncio
import datetime
import re
import aiohttp
import asyncpg
import os
import sys
import traceback
import url_parser
import humanize
import inspect
from jishaku.codeblocks import codeblock_converter
from discord.ext import commands, menus
class ClientSession(aiohttp.ClientSession):
def __init__(self, *args, **kwargs):
try:
default = {
# 'response_class': ClientResponse,
'rickroll_queries': ["rickroll","rick roll","rick astley","never gonna give you up"],
'block': [],
'timeout': aiohttp.ClientTimeout(total=300, sock_read=10) # to prevent attacks relating to sending massive payload and lagging the client
}
default.update(kwargs)
self.rickroll_regex = re.compile('|'.join(default['rickroll_queries']), re.IGNORECASE)
self.block_list = default['block']
del default['rickroll_queries']
del default['block']
super().__init__(*args, **default)
except:
raise
super().__init__(*args, **kwargs)
async def _request(self, *args, **kwargs):
req = await super()._request(*args, **kwargs)
regex = self.rickroll_regex
content = str(await req.content.read())
req.rickroll = bool(regex.search(content))
blocked_urls = self.block_list
urls = [str(redirect.url_obj) for redirect in req.history]
req.blocked = bool(await check_links(urls, blocked_urls))
return req
class RedirectMenu(menus.ListPageSource):
def __init__(self, data, ctx, rickroll=False):
grouped = [' \n'.join(data[i:i + 5]) for i in range(0, len(data), 5)]
super().__init__(grouped, per_page=1)
self.ctx = ctx
self.rickroll = rickroll
async def format_page(self, menu, entry):
embed = self.ctx.embed(title='Redirect Checker', description=entry)
embed.set_footer(text=f'Page {menu.current_page + 1}/{menu._source.get_max_pages()} | ' + embed.footer.text, icon_url=embed.footer.icon_url)
if self.rickroll:
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/814195797380825088/844955986674712646/rick.gif')
return embed
async def check_link_base(url, block_list):
url = url_parser.get_url(url)._asdict()
for blocked in block_list:
parsed_blocked = url_parser.get_url(
blocked.replace('*', '-'))._asdict()
delete = True
for k in ['sub_domain', 'domain', 'top_domain', 'path']:
rep = parsed_blocked[k]
if k == 'path':
rep = rep[1:]
if url[k] != rep and rep.replace('.','') != '-':
delete = False
break
if delete:
return True
async def check_links(urls, block_list):
for url in urls:
if await check_link_base(url, block_list):
return True
def convert_link(content):
base_regex = r'(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+$'
if re.match(r'^http[s]?://' + base_regex, content):
return content
elif re.match(r'^' + base_regex, content):
return 'https://' + content
else:
raise ValueError('Not a link')
async def check_link(url):
return await check_link_base(url, [ # "*" means any
# [http[s]://][sub.]<name>.<tld>[/path] # Reason
################################################################
'*.grabify.link/*', # Ip Grabber
'*.pornhub.com/*', # Porn
'*.guilded.gg/*', # Advertising
'*.tornadus.net/orange', # Discord Crasher
'giant.gfycat.com/SizzlingScrawnyBudgie.mp4', # Discord Crasher
])
async def find_links(cog, content, channel=None):
regex = (r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|'
r'(?:%[0-9a-fA-F][0-9a-fA-F]))+')
matches = re.findall(regex, content, re.MULTILINE)
urls = []
rickroll = False
for link in matches:
location = link
try:
for i in range(10):
if await check_link(location) or await check_invite(cog.bot, location, channel):
return 1
async with cog.session.get(location, allow_redirects=False) as resp:
location = resp.headers.get('Location')
if resp.rickroll:
rickroll = True
if location == resp.real_url or location is None:
break
except Exception as error:
print('Ignoring exception in url filter {}:'.format(content), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
if rickroll:
return 2
async def filter_links(cog, message):
if ((not isinstance(message.author, discord.Member)) or
message.author.permissions_in(message.channel).manage_messages):
return
checked = await find_links(cog, message.content, message.channel)
if checked == 1:
try:
await message.delete()
except discord.errors.NotFound:
pass
await message.channel.send((
f':warning: {message.author.mention} That link is not '
'allowed :warning:'), delete_after=15)
elif checked == 2:
await message.add_reaction(cog.bot.get_emoji(844957433511542794))
return
async def check_invite(bot, content, channel=None):
content = discord.utils.remove_markdown(content)
pattern = (
r'discord(?:(?:(?:app)?\.com)\/invite|\.gg)/([a-zA-z0-9\-]{2,})\b')
matches = re.findall(pattern, content, re.MULTILINE)
if channel.id in [
754992725480439809,
801641781028454420,
727029474767667322
]:
return False
if len(matches) > 5:
return True
for code in matches:
try:
invite = await bot.fetch_invite(code)
except discord.errors.NotFound:
invite = None # invite is fine
if invite:
if invite.guild.id not in [
channel.guild.id if channel else None,
681882711945641997, # TCA
782903894468198450, # Swasville
336642139381301249, # Discord.py
267624335836053506, # Python
412754940885467146, # Blurple
613425648685547541, # Discord Developers
]:
return True
return False
async def filter_invite(bot, message=None, content=None):
if ((not isinstance(message.author, discord.Member)) or
message.author.permissions_in(message.channel).manage_messages):
return
matched = await check_invite(bot, message.content, message.channel)
if matched:
await message.delete()
await message.channel.send((
f':warning: {message.author.mention} Invite links are not allowed '
':warning:'), delete_after=15)
return True
def gcd(a, b):
"""
calculate the greatest common divisor of a and b.
"""
while b:
a, b = b, a % b
return a
class General(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = ClientSession()
@commands.Cog.listener()
async def on_message_edit(self, before, after):
if before.content != after.content: # invoke the command again on edit
if not after.author.bot:
ctx = await self.bot.get_context(
after, cls=self.bot.helpers.Context)
await self.bot.invoke(ctx)
if after.guild:
if after.guild.id == 681882711945641997:
invite = await filter_invite(self.bot, after)
if not invite:
await filter_links(self, after)
@commands.Cog.listener()
async def on_message(self, message):
if message.guild:
if message.guild.id == 681882711945641997:
invite = await filter_invite(self.bot, message)
if not invite:
await filter_links(self, message)
@commands.command(name="source", aliases=["github", "code"])
@commands.cooldown(1, 1, commands.BucketType.channel)
async def _source(self, ctx, *, command: str = None):
"""Displays my full source code or for a specific command.
To display the source code of a subcommand you can separate it by
periods or spaces.
"""
github = '<:githubwhite:804344724621230091>'
embed = ctx.embed(title=f'{github} GitHub (Click Here) {github}')
source_url = 'https://github.com/The-Coding-Academy/coding-bot-v4'
branch = 'main'
if command is None:
embed.url = source_url
return await ctx.send(embed=embed)
if command == 'help':
src = type(self.bot.help_command)
module = src.__module__
filename = inspect.getsourcefile(src)
else:
obj = self.bot.get_command(command.replace('.', ' '))
if obj is None:
return await ctx.send(embed=ctx.error('Could not find command.'))
src = obj.callback.__code__
module = obj.callback.__module__
filename = src.co_filename
lines, firstlineno = inspect.getsourcelines(src)
if not module.startswith('discord'):
# not a built-in command
location = os.path.relpath(filename).replace('\\', '/')
else:
location = module.replace('.', '/') + '.py'
source_url = 'https://github.com/Rapptz/discord.py'
branch = 'master'
final_url = (f'{source_url}/blob/{branch}/{location}#L{firstlineno}-L'
f'{firstlineno + len(lines) - 1}')
embed.url = final_url
await ctx.send(embed=embed)
@commands.command(name="mystbin", aliases=["mb"])
@commands.cooldown(1, 1, commands.BucketType.channel)
async def _mystbin(self, ctx, *, code: codeblock_converter = None):
"""Send your code to Mystb.in. You may use codeblocks if you want,
or use code from inside a file."""
code = code.content if code else None
attachments = None
if len(ctx.message.attachments) != 0:
attachments = ctx.message.attachments
elif ctx.message.reference:
message = await ctx.channel.fetch_message(
ctx.message.reference.message_id)
attachments = message.attachments
if attachments:
for attachment in attachments:
code = await attachment.read()
if not code:
return await ctx.send(embed=ctx.error((
'Please either provide code in the command, attach a file, or '
'react to a message that contains a file.')))
async with self.bot.http._HTTPClient__session.post(
'https://mystb.in/documents', data=code) as r:
res = await r.json()
key = res["key"]
embed = ctx.embed(title="Mystb.in Link", description=(
'I pasted your code into a bin, click on the title access it!'),
url=f'https://mystb.in/{key}')
embed.set_thumbnail(url=(
'https://cdn.discordapp.com/avatars/569566608817782824/'
'14f120e096fb515d770eea38f9cddd88.png'))
await ctx.send(embed=embed)
@commands.command(name='ping')
async def _ping(self, ctx):
loading = '<a:DiscordSpin:795546311319355393>'
ws_ping = f'{(self.bot.latency * 1000):.2f}ms ' \
f'({humanize.precisedelta(datetime.timedelta(seconds=self.bot.latency))})'
embed = ctx.embed(title='PONG! :ping_pong:', description=(
f'**{loading} Websocket:** {ws_ping}\n**'
':repeat: Round-Trip:** Calculating...\n**:elephant: Database:** '
'Calculating...'))
start = time.perf_counter()
message = await ctx.send(embed=embed)
end = time.perf_counter()
await asyncio.sleep(0.5)
trip = end - start
rt_ping = f'{(trip * 1000):.2f}ms ({humanize.precisedelta(datetime.timedelta(seconds=trip))})'
embed.description = (
f'**{loading} Websocket:** {ws_ping}\n**'
f':repeat: Round-Trip:** {rt_ping}\n**:elephant: '
'Database:** Calculating...')
await message.edit(embed=embed)
await asyncio.sleep(0.5)
start = time.perf_counter()
try:
async with self.bot.pools.config.acquire() as connection:
await connection.fetchval(
'SELECT prefixes FROM serverconf WHERE id = 0')
end = time.perf_counter()
database = end - start
db_ping = f'{(database * 1000):.2f}ms ({humanize.precisedelta(datetime.timedelta(seconds=database))})'
embed.description = (
f'**{loading} Websocket:** {ws_ping}\n'
f'**:repeat: Round-Trip:** {rt_ping}\n**:elephant:'
f' Database:** {db_ping}')
except asyncpg.exceptions._base.InterfaceError:
embed.description = (
f'**{loading} Websocket:** {ws_ping}'
f'\n**:repeat: Round-Trip:** {rt_ping}\n**'
':elephant: Database:** *Did not respond!*')
await message.edit(embed=embed)
@commands.command(name='revive', aliases=['revivechat', 'chatrevive',
'revchat', 'chatrev'])
@commands.guild_only()
@commands.cooldown(1, 1800, commands.BucketType.guild)
@commands.has_any_role(729530191109554237, 795136568805294097,
725899526350831616) # Senior Mod +
async def _revive(self, ctx):
mention = ctx.guild.get_role(759219083639783448).mention
embed = ctx.embed(
title='Revive Chat Ping!',
description='Come back to chat and make it alive again!')
await ctx.send(content=mention, embed=embed)
@commands.command(name='reinvoke', aliases=['re'])
async def _reinvoke(self, ctx):
"""
Reinvoke a command, running it again. This does NOT bypass any permissions checks
"""
try:
message = await ctx.channel.fetch_message(ctx.message.reference.message_id)
except discord.errors.NotFound:
return await ctx.send(embed=ctx.error('I couldn\'t find that message'))
if message.author == ctx.author:
await ctx.message.add_reaction('\U00002705')
context = await self.bot.get_context(
message, cls=self.bot.helpers.Context)
await self.bot.invoke(context)
else:
await ctx.send(embed=ctx.error('That isn\'t your message'))
@commands.command(name="joined")
async def _joined(self, ctx, position: int):
async with ctx.typing():
if position > ctx.guild.member_count:
return await ctx.send(embed=ctx.error('There are not that many members here'))
all_members = list(ctx.guild.members)
all_members.sort(key=lambda m: m.joined_at)
def ord(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
embed = ctx.embed(title = f"The {ord(position)} person to join is: ", description=all_members[position - 1].mention)
await ctx.send(embed=embed)
@commands.command(name="joinposition", aliases=['joinpos'])
async def _join_position(self, ctx, member: discord.Member):
async with ctx.typing():
all_members = list(ctx.guild.members)
all_members.sort(key=lambda m: m.joined_at)
def ord(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
embed = ctx.embed(title = "Member info", description = f'{member.mention} was the {ord(all_members.index(member) + 1)} person to join')
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
async def math(self, ctx):
await ctx.send_help('math')
@math.command(name='simplify')
async def _math_simplify(self, ctx, fraction):
try:
numerator, denominator = (int(x) for x in fraction.split('/'))
except:
return await ctx.send_error('Not a fraction')
if denominator == 0:
return await ctx.send_error("Division by 0")
common_divisor = gcd(numerator, denominator)
(reduced_numerator, reduced_denominator) = (numerator / common_divisor, denominator / common_divisor)
if reduced_denominator == 1:
final = int(reduced_numerator)
elif common_divisor == 1:
final = f'{int(numerator)}/{int(denominator)}'
else:
final = f'{int(reduced_numerator)}/{int(reduced_denominator)}'
await ctx.send(embed=ctx.embed(title='Reduced Fraction', description=final))
@commands.command(name='redirects', aliases=['checklink'])
async def _redirects(self, ctx, url: convert_link):
hl = []
status_map = {
1: '\U0001f504',
2: '\U00002705',
3: '\U000027a1',
4: '\U0000274c',
5: '\U000026a0'
}
def build_string(res):
return f'{status_map[int(res.status / 100)]} [{(res.url_obj.host + res.url_obj.path).strip("/")}]({res.url_obj}) ({res.status} {res.reason})'
rickroll = False
try:
async with ctx.typing():
r = await self.session.get(url)
for res in r.history:
hl.append(build_string(res))
hl.append(build_string(r))
rickroll = r.rickroll
except:
return await ctx.send_error(f'Could not reach "{url}"')
pages = menus.MenuPages(source=RedirectMenu(hl, ctx, rickroll=rickroll), delete_message_after=True)
await pages.start(ctx)
def setup(bot):
bot.add_cog(General(bot))
| 40.492408 | 154 | 0.577597 |
import discord
import time
import asyncio
import datetime
import re
import aiohttp
import asyncpg
import os
import sys
import traceback
import url_parser
import humanize
import inspect
from jishaku.codeblocks import codeblock_converter
from discord.ext import commands, menus
class ClientSession(aiohttp.ClientSession):
def __init__(self, *args, **kwargs):
try:
default = {
'rickroll_queries': ["rickroll","rick roll","rick astley","never gonna give you up"],
'block': [],
'timeout': aiohttp.ClientTimeout(total=300, sock_read=10)
}
default.update(kwargs)
self.rickroll_regex = re.compile('|'.join(default['rickroll_queries']), re.IGNORECASE)
self.block_list = default['block']
del default['rickroll_queries']
del default['block']
super().__init__(*args, **default)
except:
raise
super().__init__(*args, **kwargs)
async def _request(self, *args, **kwargs):
req = await super()._request(*args, **kwargs)
regex = self.rickroll_regex
content = str(await req.content.read())
req.rickroll = bool(regex.search(content))
blocked_urls = self.block_list
urls = [str(redirect.url_obj) for redirect in req.history]
req.blocked = bool(await check_links(urls, blocked_urls))
return req
class RedirectMenu(menus.ListPageSource):
def __init__(self, data, ctx, rickroll=False):
grouped = [' \n'.join(data[i:i + 5]) for i in range(0, len(data), 5)]
super().__init__(grouped, per_page=1)
self.ctx = ctx
self.rickroll = rickroll
async def format_page(self, menu, entry):
embed = self.ctx.embed(title='Redirect Checker', description=entry)
embed.set_footer(text=f'Page {menu.current_page + 1}/{menu._source.get_max_pages()} | ' + embed.footer.text, icon_url=embed.footer.icon_url)
if self.rickroll:
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/814195797380825088/844955986674712646/rick.gif')
return embed
async def check_link_base(url, block_list):
url = url_parser.get_url(url)._asdict()
for blocked in block_list:
parsed_blocked = url_parser.get_url(
blocked.replace('*', '-'))._asdict()
delete = True
for k in ['sub_domain', 'domain', 'top_domain', 'path']:
rep = parsed_blocked[k]
if k == 'path':
rep = rep[1:]
if url[k] != rep and rep.replace('.','') != '-':
delete = False
break
if delete:
return True
async def check_links(urls, block_list):
for url in urls:
if await check_link_base(url, block_list):
return True
def convert_link(content):
base_regex = r'(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+$'
if re.match(r'^http[s]?://' + base_regex, content):
return content
elif re.match(r'^' + base_regex, content):
return 'https://' + content
else:
raise ValueError('Not a link')
async def check_link(url):
return await check_link_base(url, [
ync def check_invite(bot, content, channel=None):
content = discord.utils.remove_markdown(content)
pattern = (
r'discord(?:(?:(?:app)?\.com)\/invite|\.gg)/([a-zA-z0-9\-]{2,})\b')
matches = re.findall(pattern, content, re.MULTILINE)
if channel.id in [
754992725480439809,
801641781028454420,
727029474767667322
]:
return False
if len(matches) > 5:
return True
for code in matches:
try:
invite = await bot.fetch_invite(code)
except discord.errors.NotFound:
invite = None
if invite:
if invite.guild.id not in [
channel.guild.id if channel else None,
681882711945641997,
782903894468198450,
336642139381301249,
267624335836053506,
412754940885467146,
613425648685547541,
]:
return True
return False
async def filter_invite(bot, message=None, content=None):
if ((not isinstance(message.author, discord.Member)) or
message.author.permissions_in(message.channel).manage_messages):
return
matched = await check_invite(bot, message.content, message.channel)
if matched:
await message.delete()
await message.channel.send((
f':warning: {message.author.mention} Invite links are not allowed '
':warning:'), delete_after=15)
return True
def gcd(a, b):
while b:
a, b = b, a % b
return a
class General(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = ClientSession()
@commands.Cog.listener()
async def on_message_edit(self, before, after):
if before.content != after.content:
if not after.author.bot:
ctx = await self.bot.get_context(
after, cls=self.bot.helpers.Context)
await self.bot.invoke(ctx)
if after.guild:
if after.guild.id == 681882711945641997:
invite = await filter_invite(self.bot, after)
if not invite:
await filter_links(self, after)
@commands.Cog.listener()
async def on_message(self, message):
if message.guild:
if message.guild.id == 681882711945641997:
invite = await filter_invite(self.bot, message)
if not invite:
await filter_links(self, message)
@commands.command(name="source", aliases=["github", "code"])
@commands.cooldown(1, 1, commands.BucketType.channel)
async def _source(self, ctx, *, command: str = None):
github = '<:githubwhite:804344724621230091>'
embed = ctx.embed(title=f'{github} GitHub (Click Here) {github}')
source_url = 'https://github.com/The-Coding-Academy/coding-bot-v4'
branch = 'main'
if command is None:
embed.url = source_url
return await ctx.send(embed=embed)
if command == 'help':
src = type(self.bot.help_command)
module = src.__module__
filename = inspect.getsourcefile(src)
else:
obj = self.bot.get_command(command.replace('.', ' '))
if obj is None:
return await ctx.send(embed=ctx.error('Could not find command.'))
src = obj.callback.__code__
module = obj.callback.__module__
filename = src.co_filename
lines, firstlineno = inspect.getsourcelines(src)
if not module.startswith('discord'):
location = os.path.relpath(filename).replace('\\', '/')
else:
location = module.replace('.', '/') + '.py'
source_url = 'https://github.com/Rapptz/discord.py'
branch = 'master'
final_url = (f'{source_url}/blob/{branch}/{location}#L{firstlineno}-L'
f'{firstlineno + len(lines) - 1}')
embed.url = final_url
await ctx.send(embed=embed)
@commands.command(name="mystbin", aliases=["mb"])
@commands.cooldown(1, 1, commands.BucketType.channel)
async def _mystbin(self, ctx, *, code: codeblock_converter = None):
code = code.content if code else None
attachments = None
if len(ctx.message.attachments) != 0:
attachments = ctx.message.attachments
elif ctx.message.reference:
message = await ctx.channel.fetch_message(
ctx.message.reference.message_id)
attachments = message.attachments
if attachments:
for attachment in attachments:
code = await attachment.read()
if not code:
return await ctx.send(embed=ctx.error((
'Please either provide code in the command, attach a file, or '
'react to a message that contains a file.')))
async with self.bot.http._HTTPClient__session.post(
'https://mystb.in/documents', data=code) as r:
res = await r.json()
key = res["key"]
embed = ctx.embed(title="Mystb.in Link", description=(
'I pasted your code into a bin, click on the title access it!'),
url=f'https://mystb.in/{key}')
embed.set_thumbnail(url=(
'https://cdn.discordapp.com/avatars/569566608817782824/'
'14f120e096fb515d770eea38f9cddd88.png'))
await ctx.send(embed=embed)
@commands.command(name='ping')
async def _ping(self, ctx):
loading = '<a:DiscordSpin:795546311319355393>'
ws_ping = f'{(self.bot.latency * 1000):.2f}ms ' \
f'({humanize.precisedelta(datetime.timedelta(seconds=self.bot.latency))})'
embed = ctx.embed(title='PONG! :ping_pong:', description=(
f'**{loading} Websocket:** {ws_ping}\n**'
':repeat: Round-Trip:** Calculating...\n**:elephant: Database:** '
'Calculating...'))
start = time.perf_counter()
message = await ctx.send(embed=embed)
end = time.perf_counter()
await asyncio.sleep(0.5)
trip = end - start
rt_ping = f'{(trip * 1000):.2f}ms ({humanize.precisedelta(datetime.timedelta(seconds=trip))})'
embed.description = (
f'**{loading} Websocket:** {ws_ping}\n**'
f':repeat: Round-Trip:** {rt_ping}\n**:elephant: '
'Database:** Calculating...')
await message.edit(embed=embed)
await asyncio.sleep(0.5)
start = time.perf_counter()
try:
async with self.bot.pools.config.acquire() as connection:
await connection.fetchval(
'SELECT prefixes FROM serverconf WHERE id = 0')
end = time.perf_counter()
database = end - start
db_ping = f'{(database * 1000):.2f}ms ({humanize.precisedelta(datetime.timedelta(seconds=database))})'
embed.description = (
f'**{loading} Websocket:** {ws_ping}\n'
f'**:repeat: Round-Trip:** {rt_ping}\n**:elephant:'
f' Database:** {db_ping}')
except asyncpg.exceptions._base.InterfaceError:
embed.description = (
f'**{loading} Websocket:** {ws_ping}'
f'\n**:repeat: Round-Trip:** {rt_ping}\n**'
':elephant: Database:** *Did not respond!*')
await message.edit(embed=embed)
@commands.command(name='revive', aliases=['revivechat', 'chatrevive',
'revchat', 'chatrev'])
@commands.guild_only()
@commands.cooldown(1, 1800, commands.BucketType.guild)
@commands.has_any_role(729530191109554237, 795136568805294097,
725899526350831616)
async def _revive(self, ctx):
mention = ctx.guild.get_role(759219083639783448).mention
embed = ctx.embed(
title='Revive Chat Ping!',
description='Come back to chat and make it alive again!')
await ctx.send(content=mention, embed=embed)
@commands.command(name='reinvoke', aliases=['re'])
async def _reinvoke(self, ctx):
try:
message = await ctx.channel.fetch_message(ctx.message.reference.message_id)
except discord.errors.NotFound:
return await ctx.send(embed=ctx.error('I couldn\'t find that message'))
if message.author == ctx.author:
await ctx.message.add_reaction('\U00002705')
context = await self.bot.get_context(
message, cls=self.bot.helpers.Context)
await self.bot.invoke(context)
else:
await ctx.send(embed=ctx.error('That isn\'t your message'))
@commands.command(name="joined")
async def _joined(self, ctx, position: int):
async with ctx.typing():
if position > ctx.guild.member_count:
return await ctx.send(embed=ctx.error('There are not that many members here'))
all_members = list(ctx.guild.members)
all_members.sort(key=lambda m: m.joined_at)
def ord(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
embed = ctx.embed(title = f"The {ord(position)} person to join is: ", description=all_members[position - 1].mention)
await ctx.send(embed=embed)
@commands.command(name="joinposition", aliases=['joinpos'])
async def _join_position(self, ctx, member: discord.Member):
async with ctx.typing():
all_members = list(ctx.guild.members)
all_members.sort(key=lambda m: m.joined_at)
def ord(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
embed = ctx.embed(title = "Member info", description = f'{member.mention} was the {ord(all_members.index(member) + 1)} person to join')
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
async def math(self, ctx):
await ctx.send_help('math')
@math.command(name='simplify')
async def _math_simplify(self, ctx, fraction):
try:
numerator, denominator = (int(x) for x in fraction.split('/'))
except:
return await ctx.send_error('Not a fraction')
if denominator == 0:
return await ctx.send_error("Division by 0")
common_divisor = gcd(numerator, denominator)
(reduced_numerator, reduced_denominator) = (numerator / common_divisor, denominator / common_divisor)
if reduced_denominator == 1:
final = int(reduced_numerator)
elif common_divisor == 1:
final = f'{int(numerator)}/{int(denominator)}'
else:
final = f'{int(reduced_numerator)}/{int(reduced_denominator)}'
await ctx.send(embed=ctx.embed(title='Reduced Fraction', description=final))
@commands.command(name='redirects', aliases=['checklink'])
async def _redirects(self, ctx, url: convert_link):
hl = []
status_map = {
1: '\U0001f504',
2: '\U00002705',
3: '\U000027a1',
4: '\U0000274c',
5: '\U000026a0'
}
def build_string(res):
return f'{status_map[int(res.status / 100)]} [{(res.url_obj.host + res.url_obj.path).strip("/")}]({res.url_obj}) ({res.status} {res.reason})'
rickroll = False
try:
async with ctx.typing():
r = await self.session.get(url)
for res in r.history:
hl.append(build_string(res))
hl.append(build_string(r))
rickroll = r.rickroll
except:
return await ctx.send_error(f'Could not reach "{url}"')
pages = menus.MenuPages(source=RedirectMenu(hl, ctx, rickroll=rickroll), delete_message_after=True)
await pages.start(ctx)
def setup(bot):
bot.add_cog(General(bot))
| true | true |
f7f5942ba6d287222de417153ba8a3d8eb0a0369 | 123 | py | Python | mcts_player/removeSpace.py | MPARASHA/Gomoku-Players | f26f5aac6e719b0e5f758a34a275feaf885ae162 | [
"MIT"
] | null | null | null | mcts_player/removeSpace.py | MPARASHA/Gomoku-Players | f26f5aac6e719b0e5f758a34a275feaf885ae162 | [
"MIT"
] | null | null | null | mcts_player/removeSpace.py | MPARASHA/Gomoku-Players | f26f5aac6e719b0e5f758a34a275feaf885ae162 | [
"MIT"
] | null | null | null | import sys
for line in sys.stdin:
if line=='\n' or line=='= \n':
continue
else:
print(line,end="") | 17.571429 | 34 | 0.520325 | import sys
for line in sys.stdin:
if line=='\n' or line=='= \n':
continue
else:
print(line,end="") | true | true |
f7f5947403b33ae9b46baebb225ea75b212f9297 | 3,199 | py | Python | xos/synchronizer/test_voltha_client.py | etrirepo/olt_synchronizer | 493d20a2e404fe7aeca73eaa6d4bb18275faf4ec | [
"Apache-2.0"
] | null | null | null | xos/synchronizer/test_voltha_client.py | etrirepo/olt_synchronizer | 493d20a2e404fe7aeca73eaa6d4bb18275faf4ec | [
"Apache-2.0"
] | null | null | null | xos/synchronizer/test_voltha_client.py | etrirepo/olt_synchronizer | 493d20a2e404fe7aeca73eaa6d4bb18275faf4ec | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from time import sleep
import grpc
import os
from concurrent.futures import ThreadPoolExecutor
from voltha_client import VolthaClient, ConnectionError
from mock_voltha_server import VolthaServerMock, MOCK_VOLTHA_SERVER_ADDRESS, MOCK_VOLTHA_SERVER_PORT
from google.protobuf import json_format
test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
class TestVolthaClient(unittest.TestCase):
def setUp(self):
self.server = grpc.server(ThreadPoolExecutor(max_workers=5))
self.voltha_server_mock, _, _ = VolthaServerMock.start_voltha_server(self.server)
self.voltha_client = VolthaClient(url=MOCK_VOLTHA_SERVER_ADDRESS, port=MOCK_VOLTHA_SERVER_PORT, timeout=1)
self.devices = {"items": []}
self.olts = [
{
"id": "test_id",
"type": "simulated_olt",
},
{
'id': 'tibit_id',
'type': 'tibit_olt',
}
]
self.onus = [
{
"id": "1",
"type": "onu",
},
{
"id": "2",
"type": "broadcom_onu",
}
]
self.devices["items"].extend(self.olts)
self.devices["items"].extend(self.onus)
# Need to wait an be sure the gRPC voltha mock server is up and running
# before running the actual test.
for i in range(5):
try:
self.voltha_client.list_devices()
return
except ConnectionError:
sleep(1)
self.fail("VOLTHA gRPC server failed to start!")
def tearDown(self):
self.voltha_client.close_channel()
self.server.stop(None)
def test_list_olt_devices(self):
self.voltha_server_mock.set_devices(self.devices)
print(self.voltha_server_mock.devices)
olts = self.voltha_client.list_olt_devices()
self.assertEqual(self.olts, [json_format.MessageToDict(o) for o in olts])
def test_list_olt_devices_nodevices(self):
olts = self.voltha_client.list_olt_devices()
self.assertEqual(len(olts), 0)
def test_list_onu_devices(self):
self.voltha_server_mock.set_devices(self.devices)
onus = self.voltha_client.list_onu_devices()
self.assertEqual(self.onus, [json_format.MessageToDict(o) for o in onus])
def test_list_onu_devices_nodevices(self):
onus = self.voltha_client.list_olt_devices()
self.assertEqual(len(onus), 0)
if __name__ == "__main__":
unittest.main()
| 32.642857 | 114 | 0.648328 |
import unittest
from time import sleep
import grpc
import os
from concurrent.futures import ThreadPoolExecutor
from voltha_client import VolthaClient, ConnectionError
from mock_voltha_server import VolthaServerMock, MOCK_VOLTHA_SERVER_ADDRESS, MOCK_VOLTHA_SERVER_PORT
from google.protobuf import json_format
test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
class TestVolthaClient(unittest.TestCase):
def setUp(self):
self.server = grpc.server(ThreadPoolExecutor(max_workers=5))
self.voltha_server_mock, _, _ = VolthaServerMock.start_voltha_server(self.server)
self.voltha_client = VolthaClient(url=MOCK_VOLTHA_SERVER_ADDRESS, port=MOCK_VOLTHA_SERVER_PORT, timeout=1)
self.devices = {"items": []}
self.olts = [
{
"id": "test_id",
"type": "simulated_olt",
},
{
'id': 'tibit_id',
'type': 'tibit_olt',
}
]
self.onus = [
{
"id": "1",
"type": "onu",
},
{
"id": "2",
"type": "broadcom_onu",
}
]
self.devices["items"].extend(self.olts)
self.devices["items"].extend(self.onus)
for i in range(5):
try:
self.voltha_client.list_devices()
return
except ConnectionError:
sleep(1)
self.fail("VOLTHA gRPC server failed to start!")
def tearDown(self):
self.voltha_client.close_channel()
self.server.stop(None)
def test_list_olt_devices(self):
self.voltha_server_mock.set_devices(self.devices)
print(self.voltha_server_mock.devices)
olts = self.voltha_client.list_olt_devices()
self.assertEqual(self.olts, [json_format.MessageToDict(o) for o in olts])
def test_list_olt_devices_nodevices(self):
olts = self.voltha_client.list_olt_devices()
self.assertEqual(len(olts), 0)
def test_list_onu_devices(self):
self.voltha_server_mock.set_devices(self.devices)
onus = self.voltha_client.list_onu_devices()
self.assertEqual(self.onus, [json_format.MessageToDict(o) for o in onus])
def test_list_onu_devices_nodevices(self):
onus = self.voltha_client.list_olt_devices()
self.assertEqual(len(onus), 0)
if __name__ == "__main__":
unittest.main()
| true | true |
f7f5953df70a457cdcfa1364dcff1a4ea0d3973b | 4,490 | py | Python | tests/features/test_build_features.py | catalyst-cooperative/epacems_ramp_rates | 5a5ea6f9571823f7ef3f9c66abb4d9acb79820be | [
"MIT"
] | 1 | 2021-07-02T15:31:22.000Z | 2021-07-02T15:31:22.000Z | tests/features/test_build_features.py | catalyst-cooperative/epacems_ramp_rates | 5a5ea6f9571823f7ef3f9c66abb4d9acb79820be | [
"MIT"
] | 4 | 2021-07-30T19:42:10.000Z | 2021-08-16T19:12:21.000Z | tests/features/test_build_features.py | catalyst-cooperative/epacems_ramp_rates | 5a5ea6f9571823f7ef3f9c66abb4d9acb79820be | [
"MIT"
] | null | null | null | import pytest
import pandas as pd
import numpy as np
from ramprate.build_features import _find_uptime
def test__find_uptime_start_and_end_nonzero():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [2, 2, 0, 0, 0, 2]
# downtime=True
# first zero after non-zero
shutdown = pd.to_datetime(["2020-01-01 02:00"], utc=True)
# last zero before non-zero
startup = pd.to_datetime(["2020-01-01 04:00"], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
pd.testing.assert_frame_equal(actual, expected)
# end points ('startup') are after start points ('shutdown')
assert actual.diff(axis=1)["startup"].dt.total_seconds().fillna(1).ge(0).all()
# downtime=False
# last zero before non-zero
startup = pd.to_datetime([pd.NaT, "2020-01-01 04:00"], utc=True)
# first zero after non-zero
shutdown = pd.to_datetime(["2020-01-01 02:00", pd.NaT], utc=True)
expected = pd.DataFrame({"startup": startup, "shutdown": shutdown})
actual = _find_uptime(pd.Series(data, index=dt_idx))
pd.testing.assert_frame_equal(actual, expected)
# end points ('shutdown') are after start points ('startup')
assert actual.diff(axis=1)["shutdown"].dt.total_seconds().fillna(1).ge(0).all()
def test__find_uptime_all_zeros():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [0, 0, 0, 0, 0, 0]
# downtime=True
# first zero after non-zero
shutdown = pd.to_datetime([pd.NaT], utc=True)
# last zero before non-zero
startup = pd.to_datetime([pd.NaT], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
pd.testing.assert_frame_equal(actual, expected)
# downtime=False
# first zero after non-zero
shutdown = pd.to_datetime([], utc=True)
# last zero before non-zero
startup = pd.to_datetime([], utc=True)
expected = pd.DataFrame({"startup": startup, "shutdown": shutdown})
actual = _find_uptime(pd.Series(data, index=dt_idx))
pd.testing.assert_frame_equal(actual, expected)
def test__find_uptime_no_zeros():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [5, 5, 5, 5, 5, 5]
# downtime=True
# first zero after non-zero
shutdown = pd.to_datetime([], utc=True)
# last zero before non-zero
startup = pd.to_datetime([], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
pd.testing.assert_frame_equal(actual, expected)
# downtime=False
# first zero after non-zero
shutdown = pd.to_datetime([pd.NaT], utc=True)
# last zero before non-zero
startup = pd.to_datetime([pd.NaT], utc=True)
expected = pd.DataFrame({"startup": startup, "shutdown": shutdown})
actual = _find_uptime(pd.Series(data, index=dt_idx))
pd.testing.assert_frame_equal(actual, expected)
def test__find_uptime_start_zero_end_zero():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [0, 2, 2, 0, 2, 0]
# downtime=True
# first zero after non-zero
shutdown = pd.to_datetime([pd.NaT, "2020-01-01 03:00", "2020-01-01 05:00"], utc=True)
# last zero before non-zero
startup = pd.to_datetime(["2020-01-01 00:00", "2020-01-01 03:00", pd.NaT], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
pd.testing.assert_frame_equal(actual, expected)
# end points ('startup') are after start points ('shutdown')
assert actual.diff(axis=1)["startup"].dt.total_seconds().fillna(1).ge(0).all()
# downtime=False
# last zero before non-zero
startup = pd.to_datetime(["2020-01-01 00:00", "2020-01-01 03:00"], utc=True)
# first zero after non-zero
shutdown = pd.to_datetime(["2020-01-01 03:00", "2020-01-01 05:00"], utc=True)
expected = pd.DataFrame({"startup": startup, "shutdown": shutdown})
actual = _find_uptime(pd.Series(data, index=dt_idx))
pd.testing.assert_frame_equal(actual, expected)
# end points ('shutdown') are after start points ('startup')
assert actual.diff(axis=1)["shutdown"].dt.total_seconds().fillna(1).ge(0).all()
| 42.358491 | 89 | 0.675724 | import pytest
import pandas as pd
import numpy as np
from ramprate.build_features import _find_uptime
def test__find_uptime_start_and_end_nonzero():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [2, 2, 0, 0, 0, 2]
shutdown = pd.to_datetime(["2020-01-01 02:00"], utc=True)
startup = pd.to_datetime(["2020-01-01 04:00"], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
pd.testing.assert_frame_equal(actual, expected)
assert actual.diff(axis=1)["startup"].dt.total_seconds().fillna(1).ge(0).all()
startup = pd.to_datetime([pd.NaT, "2020-01-01 04:00"], utc=True)
shutdown = pd.to_datetime(["2020-01-01 02:00", pd.NaT], utc=True)
expected = pd.DataFrame({"startup": startup, "shutdown": shutdown})
actual = _find_uptime(pd.Series(data, index=dt_idx))
pd.testing.assert_frame_equal(actual, expected)
assert actual.diff(axis=1)["shutdown"].dt.total_seconds().fillna(1).ge(0).all()
def test__find_uptime_all_zeros():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [0, 0, 0, 0, 0, 0]
shutdown = pd.to_datetime([pd.NaT], utc=True)
startup = pd.to_datetime([pd.NaT], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
pd.testing.assert_frame_equal(actual, expected)
shutdown = pd.to_datetime([], utc=True)
startup = pd.to_datetime([], utc=True)
expected = pd.DataFrame({"startup": startup, "shutdown": shutdown})
actual = _find_uptime(pd.Series(data, index=dt_idx))
pd.testing.assert_frame_equal(actual, expected)
def test__find_uptime_no_zeros():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [5, 5, 5, 5, 5, 5]
shutdown = pd.to_datetime([], utc=True)
startup = pd.to_datetime([], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
pd.testing.assert_frame_equal(actual, expected)
shutdown = pd.to_datetime([pd.NaT], utc=True)
startup = pd.to_datetime([pd.NaT], utc=True)
expected = pd.DataFrame({"startup": startup, "shutdown": shutdown})
actual = _find_uptime(pd.Series(data, index=dt_idx))
pd.testing.assert_frame_equal(actual, expected)
def test__find_uptime_start_zero_end_zero():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [0, 2, 2, 0, 2, 0]
shutdown = pd.to_datetime([pd.NaT, "2020-01-01 03:00", "2020-01-01 05:00"], utc=True)
startup = pd.to_datetime(["2020-01-01 00:00", "2020-01-01 03:00", pd.NaT], utc=True)
expected = pd.DataFrame({"shutdown": shutdown, "startup": startup})
actual = _find_uptime(pd.Series(data, index=dt_idx), downtime=True)
pd.testing.assert_frame_equal(actual, expected)
assert actual.diff(axis=1)["startup"].dt.total_seconds().fillna(1).ge(0).all()
startup = pd.to_datetime(["2020-01-01 00:00", "2020-01-01 03:00"], utc=True)
shutdown = pd.to_datetime(["2020-01-01 03:00", "2020-01-01 05:00"], utc=True)
expected = pd.DataFrame({"startup": startup, "shutdown": shutdown})
actual = _find_uptime(pd.Series(data, index=dt_idx))
pd.testing.assert_frame_equal(actual, expected)
assert actual.diff(axis=1)["shutdown"].dt.total_seconds().fillna(1).ge(0).all()
| true | true |
f7f59690fe12952f92183a35d3046fabb355c286 | 35 | py | Python | stereovis/spiking/algorithms/__init__.py | gdikov/hybrid-stereo-matching | 6e9b6e499098d884618ecf3c1503891abdee3ce6 | [
"MIT"
] | 16 | 2017-07-05T07:20:53.000Z | 2020-12-29T19:33:38.000Z | stereovis/spiking/algorithms/__init__.py | gdikov/hybrid-stereo-matching | 6e9b6e499098d884618ecf3c1503891abdee3ce6 | [
"MIT"
] | 1 | 2020-06-02T13:57:45.000Z | 2020-06-02T13:59:21.000Z | stereovis/spiking/algorithms/__init__.py | gdikov/hybrid-stereo-matching | 6e9b6e499098d884618ecf3c1503891abdee3ce6 | [
"MIT"
] | 2 | 2019-02-18T20:25:54.000Z | 2020-12-07T07:28:33.000Z | from vvf import VelocityVectorField | 35 | 35 | 0.914286 | from vvf import VelocityVectorField | true | true |
f7f596e079123ca2234da0e9420a47383c9d9c09 | 4,007 | py | Python | cubicsplines.py | nguyenvu2589/Numerical | 23eee7d31a8871d5d53871ebc9950866cf11ad23 | [
"MIT"
] | null | null | null | cubicsplines.py | nguyenvu2589/Numerical | 23eee7d31a8871d5d53871ebc9950866cf11ad23 | [
"MIT"
] | null | null | null | cubicsplines.py | nguyenvu2589/Numerical | 23eee7d31a8871d5d53871ebc9950866cf11ad23 | [
"MIT"
] | null | null | null | import numpy as np
import re
import matplotlib.pyplot as plt
debug = False
# Takes a string of points in the string form: '(-1,3), (0,5), (3,1), (4,1), (5,1)'
# and optionally, the graph resolution.
# Prints the cubic spline functions to stdout and displays an interpolated line plot
# Example usage: cubicSpline('(-1,3), (0,5), (3,1), (4,1), (5,1)')
# or cubicSpline('(-1,3), (0,5), (3,1), (4,1), (5,1)', resolution=2) for a low resolution graph.
def cubicSpline(points, resolution=100):
if not points:
raise Exception('You must provide in data points.')
return
# Parse the coordinate inputs
if not points.count('(') == points.count(')'):
raise Exception('Input coordinates were malformed.')
return
coordinates = re.findall(r'\(.*?\)', points)
coordinates = [item.replace('(', '').replace(')', '') for item in coordinates]
# Split and convert values
x = []
y = []
for coordinate in coordinates:
try:
x_val, y_val = [float(num) for num in coordinate.split(',')]
x.append(x_val)
y.append(y_val)
except ValueError:
raise Exception('Input coordinates were malformed.')
if not len(x) == len(y):
raise Exception('Length of X and Y arrays must be equal.')
exit(1)
# Sort the input, just in case
y = [y for (x, y) in sorted(zip(x, y))]
x.sort()
# Get the number of inputs
n = len(x)
# Solve for little delta
delta = np.zeros((n - 1, 1))
for i in range(0, n - 1):
delta[i] = x[i + 1] - x[i]
# Solve for big delta
Delta = np.zeros((n - 1, 1))
for i in range(0, n - 1):
Delta[i] = y[i + 1] - y[i]
if debug:
print(delta)
print(Delta)
A = np.zeros((n, n))
A[0, 0] = 1
A[n - 1, n - 1] = 1
for i in range(1, n - 1):
A[i, i - 1] = delta[i - 1]
A[i, i] = 2 * (delta[i - 1] + delta[i])
A[i, i + 1] = delta[i]
b = np.zeros((n, 1))
for i in range(1, n - 1):
b[i] = (3 * ((Delta[i] / delta[i]) - (Delta[i - 1] / delta[i - 1])))
# Solve for c coefficients
ci = np.linalg.solve(A, b)
if debug:
print(ci)
# Solve for d coefficients
di = np.zeros((n - 1, 1))
for i in range(0, n - 1):
di[i] = (ci[i + 1] - ci[i]) / (3 * delta[i])
if debug:
print(di)
# Solve for b coefficients
bi = np.zeros((n - 1, 1))
for i in range(0, n - 1):
bi[i] = (Delta[i] / delta[i]) - (delta[i] / 3) * (2 * ci[i] + ci[i + 1])
# And finally, let's get our formulas!
Si = []
formulas = []
for i in range(0, n - 1):
tempFormula = "{0} + {1}* (x_val - {2}) + {3}* (x_val - {4})**2 + {5}* (x_val - {6})**3"
tempFormula = tempFormula.format(str(y[i]), str(bi[i]), str(x[i]), str(ci[i]), str(x[i]), str(di[i]), str(x[i]))
# ugly but formats the formula nice
tempFormula = re.sub(' +', ' ', tempFormula.replace('[', ' ').replace(']', ' '))
tempString = (("S{0}(x) = " + tempFormula).format(str(i + 1)).replace('**', '^')
.replace('x_val', 'x').replace('- -', '+ ').replace('x - 0', 'x'))
formulas.append(tempFormula)
Si.append(tempString)
for i in range(0, len(Si)):
print(Si[i])
x_vals = []
y_vals = []
# Set up the plot
for i in range(0, n - 1):
xf = np.linspace(x[i], x[i + 1], resolution)
yf = []
for j in range(0, resolution):
# Due to Python's variable declarations we have x_val references in the formulas,
# but PEP 8 will complain the value is unused. It's not.
x_val = xf[j]
yf.append(eval(formulas[i]))
x_vals.extend(xf)
y_vals.extend(yf)
plt.plot(x, y, 'o', x_vals, y_vals, '-')
plt.legend(['Input X Values', 'Cubic Spline curve'], loc='best')
plt.title('Cubic Spline Interpolation')
# plt.show()
#cubicSpline('(0,1),(2,2),(3,4)')
| 28.827338 | 120 | 0.519591 | import numpy as np
import re
import matplotlib.pyplot as plt
debug = False
def cubicSpline(points, resolution=100):
if not points:
raise Exception('You must provide in data points.')
return
if not points.count('(') == points.count(')'):
raise Exception('Input coordinates were malformed.')
return
coordinates = re.findall(r'\(.*?\)', points)
coordinates = [item.replace('(', '').replace(')', '') for item in coordinates]
x = []
y = []
for coordinate in coordinates:
try:
x_val, y_val = [float(num) for num in coordinate.split(',')]
x.append(x_val)
y.append(y_val)
except ValueError:
raise Exception('Input coordinates were malformed.')
if not len(x) == len(y):
raise Exception('Length of X and Y arrays must be equal.')
exit(1)
y = [y for (x, y) in sorted(zip(x, y))]
x.sort()
n = len(x)
delta = np.zeros((n - 1, 1))
for i in range(0, n - 1):
delta[i] = x[i + 1] - x[i]
Delta = np.zeros((n - 1, 1))
for i in range(0, n - 1):
Delta[i] = y[i + 1] - y[i]
if debug:
print(delta)
print(Delta)
A = np.zeros((n, n))
A[0, 0] = 1
A[n - 1, n - 1] = 1
for i in range(1, n - 1):
A[i, i - 1] = delta[i - 1]
A[i, i] = 2 * (delta[i - 1] + delta[i])
A[i, i + 1] = delta[i]
b = np.zeros((n, 1))
for i in range(1, n - 1):
b[i] = (3 * ((Delta[i] / delta[i]) - (Delta[i - 1] / delta[i - 1])))
ci = np.linalg.solve(A, b)
if debug:
print(ci)
di = np.zeros((n - 1, 1))
for i in range(0, n - 1):
di[i] = (ci[i + 1] - ci[i]) / (3 * delta[i])
if debug:
print(di)
bi = np.zeros((n - 1, 1))
for i in range(0, n - 1):
bi[i] = (Delta[i] / delta[i]) - (delta[i] / 3) * (2 * ci[i] + ci[i + 1])
Si = []
formulas = []
for i in range(0, n - 1):
tempFormula = "{0} + {1}* (x_val - {2}) + {3}* (x_val - {4})**2 + {5}* (x_val - {6})**3"
tempFormula = tempFormula.format(str(y[i]), str(bi[i]), str(x[i]), str(ci[i]), str(x[i]), str(di[i]), str(x[i]))
# ugly but formats the formula nice
tempFormula = re.sub(' +', ' ', tempFormula.replace('[', ' ').replace(']', ' '))
tempString = (("S{0}(x) = " + tempFormula).format(str(i + 1)).replace('**', '^')
.replace('x_val', 'x').replace('- -', '+ ').replace('x - 0', 'x'))
formulas.append(tempFormula)
Si.append(tempString)
for i in range(0, len(Si)):
print(Si[i])
x_vals = []
y_vals = []
# Set up the plot
for i in range(0, n - 1):
xf = np.linspace(x[i], x[i + 1], resolution)
yf = []
for j in range(0, resolution):
# Due to Python's variable declarations we have x_val references in the formulas,
x_val = xf[j]
yf.append(eval(formulas[i]))
x_vals.extend(xf)
y_vals.extend(yf)
plt.plot(x, y, 'o', x_vals, y_vals, '-')
plt.legend(['Input X Values', 'Cubic Spline curve'], loc='best')
plt.title('Cubic Spline Interpolation')
# plt.show()
#cubicSpline('(0,1),(2,2),(3,4)')
| true | true |
f7f59816b91874032a4a92dde9df9453888f9420 | 482 | py | Python | databutler/pat/analysis/clock.py | rbavishi/databutler | 222263672dae8b519d0592a6bbe68a01dc4ce95d | [
"BSD-2-Clause"
] | null | null | null | databutler/pat/analysis/clock.py | rbavishi/databutler | 222263672dae8b519d0592a6bbe68a01dc4ce95d | [
"BSD-2-Clause"
] | 1 | 2022-02-11T06:19:45.000Z | 2022-02-11T06:19:45.000Z | databutler/pat/analysis/clock.py | rbavishi/databutler | 222263672dae8b519d0592a6bbe68a01dc4ce95d | [
"BSD-2-Clause"
] | null | null | null | import attr
@attr.s(cmp=False, repr=False)
class LogicalClock:
"""
A basic clock implementation supporting getters and increment methods.
"""
_time: int = attr.ib(init=False)
def __attrs_post_init__(self):
self.reset()
def reset(self, init_time: int = 0):
self._time = init_time
def get_time(self) -> int:
return self._time
def increment(self, step: int = 1) -> int:
self._time += step
return self._time
| 20.956522 | 74 | 0.620332 | import attr
@attr.s(cmp=False, repr=False)
class LogicalClock:
_time: int = attr.ib(init=False)
def __attrs_post_init__(self):
self.reset()
def reset(self, init_time: int = 0):
self._time = init_time
def get_time(self) -> int:
return self._time
def increment(self, step: int = 1) -> int:
self._time += step
return self._time
| true | true |
f7f5981c7727d4ad30c6795c8d8b0f605691a60c | 1,862 | py | Python | vsts/vsts/gallery/v4_1/models/qn_aItem.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/gallery/v4_1/models/qn_aItem.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/gallery/v4_1/models/qn_aItem.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class QnAItem(Model):
"""QnAItem.
:param created_date: Time when the review was first created
:type created_date: datetime
:param id: Unique identifier of a QnA item
:type id: long
:param status: Get status of item
:type status: object
:param text: Text description of the QnA item
:type text: str
:param updated_date: Time when the review was edited/updated
:type updated_date: datetime
:param user: User details for the item.
:type user: :class:`UserIdentityRef <gallery.v4_1.models.UserIdentityRef>`
"""
_attribute_map = {
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'long'},
'status': {'key': 'status', 'type': 'object'},
'text': {'key': 'text', 'type': 'str'},
'updated_date': {'key': 'updatedDate', 'type': 'iso-8601'},
'user': {'key': 'user', 'type': 'UserIdentityRef'}
}
def __init__(self, created_date=None, id=None, status=None, text=None, updated_date=None, user=None):
super(QnAItem, self).__init__()
self.created_date = created_date
self.id = id
self.status = status
self.text = text
self.updated_date = updated_date
self.user = user
| 40.478261 | 105 | 0.548872 |
from msrest.serialization import Model
class QnAItem(Model):
_attribute_map = {
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'long'},
'status': {'key': 'status', 'type': 'object'},
'text': {'key': 'text', 'type': 'str'},
'updated_date': {'key': 'updatedDate', 'type': 'iso-8601'},
'user': {'key': 'user', 'type': 'UserIdentityRef'}
}
def __init__(self, created_date=None, id=None, status=None, text=None, updated_date=None, user=None):
super(QnAItem, self).__init__()
self.created_date = created_date
self.id = id
self.status = status
self.text = text
self.updated_date = updated_date
self.user = user
| true | true |
f7f5987e700ca936a3caf26863d5683d4f4b8224 | 3,230 | py | Python | tests/integration/test_tostac.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_tostac.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | tests/integration/test_tostac.py | csiro-easi/eo-datasets | 7805c569763f828cb0ace84c93932bddb882a6a3 | [
"Apache-2.0"
] | null | null | null | import json
import shutil
from functools import partial
from pathlib import Path
from pprint import pformat
import pytest
from deepdiff import DeepDiff
from eodatasets3 import serialise
from eodatasets3.scripts import tostac
from tests.integration.common import run_prepare_cli
TO_STAC_DATA: Path = Path(__file__).parent.joinpath("data/tostac")
ODC_METADATA_FILE: str = "ga_ls8c_ard_3-1-0_088080_2020-05-25_final.odc-metadata.yaml"
STAC_TEMPLATE_FILE: str = "ga_ls_ard_3_stac_item.json"
STAC_EXPECTED_FILE: str = "ga_ls8c_ard_3-1-0_088080_2020-05-25_final.stac-item_expected.json"
deep_diff = partial(DeepDiff, significant_digits=6)
def test_tostac(input_doc_folder: Path):
input_metadata_path = input_doc_folder.joinpath(ODC_METADATA_FILE)
assert input_metadata_path.exists()
run_tostac(input_metadata_path)
name = input_metadata_path.stem.replace(".odc-metadata", "")
actual_stac_path = input_metadata_path.with_name(f"{name}.stac-item.json")
assert actual_stac_path.exists()
expected_stac_path = input_doc_folder.joinpath(STAC_EXPECTED_FILE)
assert expected_stac_path.exists()
actual_doc = json.load(actual_stac_path.open())
expected_doc = json.load(expected_stac_path.open())
doc_diff = deep_diff(expected_doc, actual_doc)
assert doc_diff == {}, pformat(doc_diff)
def test_add_property(input_doc_folder: Path):
input_metadata_path = input_doc_folder.joinpath(ODC_METADATA_FILE)
assert input_metadata_path.exists()
input_doc = serialise.load_yaml(input_metadata_path)
input_doc["properties"]["test"] = "testvalue"
serialise.dump_yaml(input_metadata_path, input_doc)
assert input_metadata_path.exists()
run_tostac(input_metadata_path)
name = input_metadata_path.stem.replace(".odc-metadata", "")
actual_stac_path = input_metadata_path.with_name(f"{name}.stac-item.json")
assert actual_stac_path.exists()
actual_doc = json.load(actual_stac_path.open())
assert actual_doc["properties"]["test"] == input_doc["properties"]["test"]
def test_invalid_crs(input_doc_folder: Path):
input_metadata_path = input_doc_folder.joinpath(ODC_METADATA_FILE)
assert input_metadata_path.exists()
input_doc = serialise.load_yaml(input_metadata_path)
del input_doc["crs"]
serialise.dump_yaml(input_metadata_path, input_doc)
assert input_metadata_path.exists()
with pytest.raises(RuntimeError) as exp:
run_tostac(input_metadata_path)
assert (
str(exp.value) == "Expect string or any object with "
"`.to_epsg()` or `.to_wkt()` method"
)
def run_tostac(input_metadata_path: Path):
run_prepare_cli(
tostac.run,
"-u",
"http://dea-public-data-dev.s3-ap-southeast-2.amazonaws.com/"
"analysis-ready-data/ga_ls8c_ard_3/088/080/2020/05/25/",
"-e",
"https://explorer.dev.dea.ga.gov.au/",
"--validate",
input_metadata_path,
)
@pytest.fixture
def input_doc_folder(tmp_path: Path) -> Path:
tmp_input_path = tmp_path / TO_STAC_DATA.name
if TO_STAC_DATA.is_file():
shutil.copy(TO_STAC_DATA, tmp_input_path)
else:
shutil.copytree(TO_STAC_DATA, tmp_input_path)
return tmp_input_path
| 32.626263 | 93 | 0.744892 | import json
import shutil
from functools import partial
from pathlib import Path
from pprint import pformat
import pytest
from deepdiff import DeepDiff
from eodatasets3 import serialise
from eodatasets3.scripts import tostac
from tests.integration.common import run_prepare_cli
TO_STAC_DATA: Path = Path(__file__).parent.joinpath("data/tostac")
ODC_METADATA_FILE: str = "ga_ls8c_ard_3-1-0_088080_2020-05-25_final.odc-metadata.yaml"
STAC_TEMPLATE_FILE: str = "ga_ls_ard_3_stac_item.json"
STAC_EXPECTED_FILE: str = "ga_ls8c_ard_3-1-0_088080_2020-05-25_final.stac-item_expected.json"
deep_diff = partial(DeepDiff, significant_digits=6)
def test_tostac(input_doc_folder: Path):
input_metadata_path = input_doc_folder.joinpath(ODC_METADATA_FILE)
assert input_metadata_path.exists()
run_tostac(input_metadata_path)
name = input_metadata_path.stem.replace(".odc-metadata", "")
actual_stac_path = input_metadata_path.with_name(f"{name}.stac-item.json")
assert actual_stac_path.exists()
expected_stac_path = input_doc_folder.joinpath(STAC_EXPECTED_FILE)
assert expected_stac_path.exists()
actual_doc = json.load(actual_stac_path.open())
expected_doc = json.load(expected_stac_path.open())
doc_diff = deep_diff(expected_doc, actual_doc)
assert doc_diff == {}, pformat(doc_diff)
def test_add_property(input_doc_folder: Path):
input_metadata_path = input_doc_folder.joinpath(ODC_METADATA_FILE)
assert input_metadata_path.exists()
input_doc = serialise.load_yaml(input_metadata_path)
input_doc["properties"]["test"] = "testvalue"
serialise.dump_yaml(input_metadata_path, input_doc)
assert input_metadata_path.exists()
run_tostac(input_metadata_path)
name = input_metadata_path.stem.replace(".odc-metadata", "")
actual_stac_path = input_metadata_path.with_name(f"{name}.stac-item.json")
assert actual_stac_path.exists()
actual_doc = json.load(actual_stac_path.open())
assert actual_doc["properties"]["test"] == input_doc["properties"]["test"]
def test_invalid_crs(input_doc_folder: Path):
input_metadata_path = input_doc_folder.joinpath(ODC_METADATA_FILE)
assert input_metadata_path.exists()
input_doc = serialise.load_yaml(input_metadata_path)
del input_doc["crs"]
serialise.dump_yaml(input_metadata_path, input_doc)
assert input_metadata_path.exists()
with pytest.raises(RuntimeError) as exp:
run_tostac(input_metadata_path)
assert (
str(exp.value) == "Expect string or any object with "
"`.to_epsg()` or `.to_wkt()` method"
)
def run_tostac(input_metadata_path: Path):
run_prepare_cli(
tostac.run,
"-u",
"http://dea-public-data-dev.s3-ap-southeast-2.amazonaws.com/"
"analysis-ready-data/ga_ls8c_ard_3/088/080/2020/05/25/",
"-e",
"https://explorer.dev.dea.ga.gov.au/",
"--validate",
input_metadata_path,
)
@pytest.fixture
def input_doc_folder(tmp_path: Path) -> Path:
tmp_input_path = tmp_path / TO_STAC_DATA.name
if TO_STAC_DATA.is_file():
shutil.copy(TO_STAC_DATA, tmp_input_path)
else:
shutil.copytree(TO_STAC_DATA, tmp_input_path)
return tmp_input_path
| true | true |
f7f598b3a2d44bdedeae8e590488565af415da3a | 2,656 | py | Python | aliyun-python-sdk-core/aliyunsdkcore/auth/signers/ecs_ram_role_singer.py | DataDog/aliyun-openapi-python-sdk | 5cbee29bce6416dd62f61f0c3786b1af6ea0d84f | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-core/aliyunsdkcore/auth/signers/ecs_ram_role_singer.py | liusc27/aliyun-openapi-python-sdk | 5e3db3535dd21de987dc5981e71151327d5a884f | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-core/aliyunsdkcore/auth/signers/ecs_ram_role_singer.py | liusc27/aliyun-openapi-python-sdk | 5e3db3535dd21de987dc5981e71151327d5a884f | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # coding:utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import json
import urllib2
import logging
from aliyunsdkcore.auth.signers.signer import Signer
class EcsRamRoleSigner(Signer):
_REFRESH_SCALE = 0.8
def __init__(self, ecs_ram_role_credential):
self._credential = ecs_ram_role_credential
self._last_update_time = 0
self._expiration = 0
def sign(self, region_id, request):
self._check_session_credential()
session_ak, session_sk, token = self._session_credential
if request.get_style() == 'RPC':
request.add_query_param("SecurityToken", token)
else:
request.add_header("x-acs-security-token", token)
header = request.get_signed_header(region_id, session_ak, session_sk)
url = request.get_url(region_id, session_ak, session_sk)
return header, url
def _check_session_credential(self):
now = int(time.time())
if now - self._last_update_time > (self._expiration * self._REFRESH_SCALE):
self._refresh_session_ak_and_sk()
def _refresh_session_ak_and_sk(self):
try:
request_url = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + self._credential.role_name
content = urllib2.urlopen(request_url).read()
response = json.loads(content)
if response.get("Code") != "Success":
logging.error('refresh Ecs sts token err, code is ' + response.get("Code"))
return
session_ak = response.get("AccessKeyId")
session_sk = response.get("AccessKeySecret")
token = response.get("SecurityToken")
self._session_credential = session_ak, session_sk, token
self._expiration = response.get("Expiration")
except IOError as e:
logging.error('refresh Ecs sts token err', e)
| 38.492754 | 122 | 0.687877 |
import time
import json
import urllib2
import logging
from aliyunsdkcore.auth.signers.signer import Signer
class EcsRamRoleSigner(Signer):
_REFRESH_SCALE = 0.8
def __init__(self, ecs_ram_role_credential):
self._credential = ecs_ram_role_credential
self._last_update_time = 0
self._expiration = 0
def sign(self, region_id, request):
self._check_session_credential()
session_ak, session_sk, token = self._session_credential
if request.get_style() == 'RPC':
request.add_query_param("SecurityToken", token)
else:
request.add_header("x-acs-security-token", token)
header = request.get_signed_header(region_id, session_ak, session_sk)
url = request.get_url(region_id, session_ak, session_sk)
return header, url
def _check_session_credential(self):
now = int(time.time())
if now - self._last_update_time > (self._expiration * self._REFRESH_SCALE):
self._refresh_session_ak_and_sk()
def _refresh_session_ak_and_sk(self):
try:
request_url = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + self._credential.role_name
content = urllib2.urlopen(request_url).read()
response = json.loads(content)
if response.get("Code") != "Success":
logging.error('refresh Ecs sts token err, code is ' + response.get("Code"))
return
session_ak = response.get("AccessKeyId")
session_sk = response.get("AccessKeySecret")
token = response.get("SecurityToken")
self._session_credential = session_ak, session_sk, token
self._expiration = response.get("Expiration")
except IOError as e:
logging.error('refresh Ecs sts token err', e)
| true | true |
f7f599066e774b28747c46edc011e399bff0fcc6 | 3,686 | py | Python | splearn/data/sample_ssvep.py | jinglescode/python-signal-processing | c3de02b12905f14a2350377d7f4a868bd7a40bc7 | [
"BSD-3-Clause"
] | 10 | 2020-12-24T09:27:06.000Z | 2021-12-09T14:48:10.000Z | splearn/data/sample_ssvep.py | jinglescode/python-signal-processing | c3de02b12905f14a2350377d7f4a868bd7a40bc7 | [
"BSD-3-Clause"
] | null | null | null | splearn/data/sample_ssvep.py | jinglescode/python-signal-processing | c3de02b12905f14a2350377d7f4a868bd7a40bc7 | [
"BSD-3-Clause"
] | 3 | 2021-11-09T21:06:15.000Z | 2022-03-28T09:08:41.000Z | # -*- coding: utf-8 -*-
"""A 40-target SSVEP dataset recorded from a single subject.
"""
import numpy as np
from scipy.io import loadmat
import os
class SampleSSVEPData():
r"""
A 40-target SSVEP dataset recorded from a single subject.
Data description:
Original Data shape : (40, 9, 1250, 6) [# of targets, # of channels, # of sampling points, # of blocks]
Stimulus frequencies : 8.0 - 15.8 Hz with an interval of 0.2 Hz
Stimulus phases : 0pi, 0.5pi, 1.0pi, and 1.5pi
Number of channels : 9 (1: Pz, 2: PO5,3: PO3, 4: POz, 5: PO4, 6: PO6, 7: O1, 8: Oz, and 9: O2)
Number of recording blocks : 6
Length of an epoch : 5 seconds
Sampling rate : 250 Hz
Args:
path: str, default: None
Path to ssvepdata.mat file
Usage:
>>> from splearn.cross_decomposition.trca import TRCA
>>> from splearn.data.sample_ssvep import SampleSSVEPData
>>>
>>> data = SampleSSVEPData()
>>> eeg = data.get_data()
>>> labels = data.get_targets()
>>> print("eeg.shape:", eeg.shape)
>>> print("labels.shape:", labels.shape)
Reference:
https://www.pnas.org/content/early/2015/10/14/1508080112.abstract
"""
def __init__(self, path=None):
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sample")
# Get EEG data
data = loadmat(os.path.join(path,"ssvep.mat"))
data = data["eeg"]
data = data.transpose([3,0,1,2])
self.data = data
# Prepare targets
n_blocks, n_targets, n_channels, n_samples = self.data.shape
targets = np.tile(np.arange(0, n_targets+0), (1, n_blocks))
targets = targets.reshape((n_blocks, n_targets))
self.targets = targets
# Prepare targets frequencies
self.stimulus_frequencies = np.array([8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,8.2,9.2,10.2,11.2,12.2,13.2,14.2,15.2,8.4,9.4,10.4,11.4,12.4,13.4,14.4,15.4,8.6,9.6,10.6,11.6,12.6,13.6,14.6,15.6,8.8,9.8,10.8,11.8,12.8,13.8,14.8,15.8])
targets_frequencies = np.tile(self.stimulus_frequencies, (1, n_blocks))
targets_frequencies = targets_frequencies.reshape((n_blocks, n_targets))
self.targets_frequencies = targets_frequencies
self.sampling_rate = 250
self.channels = ["Pz", "PO5","PO3", "POz", "PO4", "PO6", "O1", "Oz", "O2"]
def get_data(self):
r"""
Data shape: (6, 40, 9, 1250) [# of blocks, # of targets, # of channels, # of sampling points]
"""
return self.data
def get_targets(self):
r"""
Targets index from 0 to 39. Shape: (6, 40) [# of blocks, # of targets]
"""
return self.targets
def get_stimulus_frequencies(self):
r"""
A list of frequencies of each stimulus:
[8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,8.2,9.2,10.2,11.2,12.2,13.2,14.2,15.2,8.4,9.4,10.4,11.4,12.4,13.4,14.4,15.4,8.6,9.6,10.6,11.6,12.6,13.6,14.6,15.6,8.8,9.8,10.8,11.8,12.8,13.8,14.8,15.8]
"""
return self.stimulus_frequencies
def get_targets_frequencies(self):
r"""
Targets by frequencies, range between 8.0 Hz to 15.8 Hz.
Shape: (6, 40) [# of blocks, # of targets]
"""
return self.targets_frequencies
if __name__ == "__main__":
from splearn.data.sample_ssvep import SampleSSVEPData
data = SampleSSVEPData()
eeg = data.get_data()
labels = data.get_targets()
print("eeg.shape:", eeg.shape)
print("labels.shape:", labels.shape)
| 38 | 237 | 0.582474 |
import numpy as np
from scipy.io import loadmat
import os
class SampleSSVEPData():
def __init__(self, path=None):
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sample")
data = loadmat(os.path.join(path,"ssvep.mat"))
data = data["eeg"]
data = data.transpose([3,0,1,2])
self.data = data
n_blocks, n_targets, n_channels, n_samples = self.data.shape
targets = np.tile(np.arange(0, n_targets+0), (1, n_blocks))
targets = targets.reshape((n_blocks, n_targets))
self.targets = targets
self.stimulus_frequencies = np.array([8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,8.2,9.2,10.2,11.2,12.2,13.2,14.2,15.2,8.4,9.4,10.4,11.4,12.4,13.4,14.4,15.4,8.6,9.6,10.6,11.6,12.6,13.6,14.6,15.6,8.8,9.8,10.8,11.8,12.8,13.8,14.8,15.8])
targets_frequencies = np.tile(self.stimulus_frequencies, (1, n_blocks))
targets_frequencies = targets_frequencies.reshape((n_blocks, n_targets))
self.targets_frequencies = targets_frequencies
self.sampling_rate = 250
self.channels = ["Pz", "PO5","PO3", "POz", "PO4", "PO6", "O1", "Oz", "O2"]
def get_data(self):
return self.data
def get_targets(self):
return self.targets
def get_stimulus_frequencies(self):
return self.stimulus_frequencies
def get_targets_frequencies(self):
return self.targets_frequencies
if __name__ == "__main__":
from splearn.data.sample_ssvep import SampleSSVEPData
data = SampleSSVEPData()
eeg = data.get_data()
labels = data.get_targets()
print("eeg.shape:", eeg.shape)
print("labels.shape:", labels.shape)
| true | true |
f7f5991ffbc6f4cc8961f561fa79454bd4c1b374 | 1,769 | py | Python | PyQt6/example.py | avazevfx/pyqt-colorpicker-widget | 07447a8f63582bef9a413cfd7b03eb8bbe27a3a6 | [
"MIT"
] | 4 | 2021-05-21T09:13:39.000Z | 2021-12-08T05:13:32.000Z | PyQt6/example.py | avazevfx/pyqt-colorpicker-widget | 07447a8f63582bef9a413cfd7b03eb8bbe27a3a6 | [
"MIT"
] | 1 | 2021-05-15T08:48:24.000Z | 2021-05-15T10:17:31.000Z | PyQt6/example.py | avazevfx/pyqt-colorpicker-widget | 07447a8f63582bef9a413cfd7b03eb8bbe27a3a6 | [
"MIT"
] | 1 | 2021-05-21T07:48:24.000Z | 2021-05-21T07:48:24.000Z | from PyQt6.QtWidgets import *
from example_window import Ui_MainWindow
from colorpicker import ColorPicker
# Basic Window class using QtDesigner & pyuic5
class My_Window(QMainWindow):
def __init__(self, *args, **kwargs):
super(My_Window, self).__init__(*args, **kwargs)
# set up your custom UI
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Create the colorpicker widget inside the colorpicker_frame in your ui
# Use rgb, hsv and hex arguments to set initial color
self.colorpicker = ColorPicker(self.ui.colorpicker_frame, hsv=(50,50,50))
# the colorpicker handle is a bit dark and blends in with the bg, let's change it:
self.colorpicker.ui.hue_selector.setStyleSheet("background-color: #aaa")
# connect custom Button to display currently selected color
self.ui.pushButton.clicked.connect(self.selectColor)
# using ColorPicker's colorChanged signal:
self.colorpicker.colorChanged.connect(self.onColorChange)
def selectColor(self):
# get current color wit getColor() method
r,g,b = self.colorpicker.getRGB()
h,s,v = self.colorpicker.getHSV()
hsv = self.colorpicker.getHSV(360, 1) # hue in degrees, saturation & value from 0 to 1
rgb = self.colorpicker.getRGB(100) # rgb with white = (100,100,100)
hex = self.colorpicker.getHex(True) # output with hashtag in string
self.ui.selected_color_frame.setStyleSheet(f"background-color: rgb({r},{g},{b})")
def onColorChange(self):
hex = self.colorpicker.getHex(True)
self.ui.hex_label.setText(hex)
if __name__=="__main__":
app = QApplication([])
window = My_Window()
window.show()
app.exec()
| 31.035088 | 95 | 0.673827 | from PyQt6.QtWidgets import *
from example_window import Ui_MainWindow
from colorpicker import ColorPicker
class My_Window(QMainWindow):
def __init__(self, *args, **kwargs):
super(My_Window, self).__init__(*args, **kwargs)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.colorpicker = ColorPicker(self.ui.colorpicker_frame, hsv=(50,50,50))
self.colorpicker.ui.hue_selector.setStyleSheet("background-color: #aaa")
# connect custom Button to display currently selected color
self.ui.pushButton.clicked.connect(self.selectColor)
# using ColorPicker's colorChanged signal:
self.colorpicker.colorChanged.connect(self.onColorChange)
def selectColor(self):
r,g,b = self.colorpicker.getRGB()
h,s,v = self.colorpicker.getHSV()
hsv = self.colorpicker.getHSV(360, 1)
rgb = self.colorpicker.getRGB(100)
hex = self.colorpicker.getHex(True)
self.ui.selected_color_frame.setStyleSheet(f"background-color: rgb({r},{g},{b})")
def onColorChange(self):
hex = self.colorpicker.getHex(True)
self.ui.hex_label.setText(hex)
if __name__=="__main__":
app = QApplication([])
window = My_Window()
window.show()
app.exec()
| true | true |
f7f59955b810d4bc1e989801468b0a2efd4a63c0 | 922 | py | Python | main.py | ronanhansel/juvenile | d9e79cc21ebfac7127e22f059a298601a7906348 | [
"MIT"
] | 1 | 2021-08-18T16:26:24.000Z | 2021-08-18T16:26:24.000Z | main.py | ronanhansel/juvenile | d9e79cc21ebfac7127e22f059a298601a7906348 | [
"MIT"
] | null | null | null | main.py | ronanhansel/juvenile | d9e79cc21ebfac7127e22f059a298601a7906348 | [
"MIT"
] | null | null | null | from discord.ext import commands
import os
# Import and load all files
client = commands.Bot(command_prefix="-")
client.remove_command('help')
@client.command(hidden=True)
@commands.is_owner()
async def load(ctx, extension):
client.load_extension(f'cogs.{extension}')
await ctx.send(f'Loaded: {extension}')
@client.command(hidden=True)
@commands.is_owner()
async def unload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
await ctx.send(f'Unloaded: {extension}')
@client.command(hidden=True)
@commands.is_owner()
async def reload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
client.load_extension(f'cogs.{extension}')
await ctx.send(f'Reloaded: {extension}')
for filename in os.listdir('./cogs'):
if filename.endswith('py'):
client.load_extension(f'cogs.{filename[:-3]}')
if __name__ == "__main__":
client.run(os.getenv('discord_token'))
| 25.611111 | 54 | 0.71692 | from discord.ext import commands
import os
client = commands.Bot(command_prefix="-")
client.remove_command('help')
@client.command(hidden=True)
@commands.is_owner()
async def load(ctx, extension):
client.load_extension(f'cogs.{extension}')
await ctx.send(f'Loaded: {extension}')
@client.command(hidden=True)
@commands.is_owner()
async def unload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
await ctx.send(f'Unloaded: {extension}')
@client.command(hidden=True)
@commands.is_owner()
async def reload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
client.load_extension(f'cogs.{extension}')
await ctx.send(f'Reloaded: {extension}')
for filename in os.listdir('./cogs'):
if filename.endswith('py'):
client.load_extension(f'cogs.{filename[:-3]}')
if __name__ == "__main__":
client.run(os.getenv('discord_token'))
| true | true |
f7f599c0f7464cc5f91e7f3bda1d649d81b875b2 | 877,808 | py | Python | treeno/grammar/gen/SqlBaseParser.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | 1 | 2021-12-28T19:00:01.000Z | 2021-12-28T19:00:01.000Z | treeno/grammar/gen/SqlBaseParser.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | null | null | null | treeno/grammar/gen/SqlBaseParser.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | null | null | null | # Generated from SqlBase.g4 by ANTLR 4.9.2
# encoding: utf-8
import sys
from io import StringIO
from antlr4 import *
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\u011c")
buf.write("\u09bd\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t")
buf.write(";\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\t")
buf.write("D\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\tL\4M\t")
buf.write("M\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\tU\4V\t")
buf.write("V\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\3\2\3\2\3\2\3")
buf.write("\3\3\3\3\3\3\4\3\4\3\4\3\5\3\5\3\5\3\6\3\6\3\6\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u00d5")
buf.write("\n\7\3\7\3\7\3\7\5\7\u00da\n\7\3\7\3\7\5\7\u00de\n\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u00e4\n\7\3\7\3\7\5\7\u00e8\n\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\5\7\u00fd\n\7\3\7\3\7\5\7\u0101\n\7")
buf.write("\3\7\3\7\5\7\u0105\n\7\3\7\3\7\5\7\u0109\n\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u0111\n\7\3\7\3\7\5\7\u0115\n\7\3\7")
buf.write("\5\7\u0118\n\7\3\7\3\7\3\7\3\7\3\7\5\7\u011f\n\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\7\7\u0126\n\7\f\7\16\7\u0129\13\7\3\7\3")
buf.write("\7\3\7\5\7\u012e\n\7\3\7\3\7\5\7\u0132\n\7\3\7\3\7\3\7")
buf.write("\3\7\5\7\u0138\n\7\3\7\3\7\3\7\3\7\3\7\5\7\u013f\n\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0148\n\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0154\n\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u015d\n\7\3\7\3\7\3\7\3\7\5\7\u0163")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u016e\n\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0176\n\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\5\7\u017e\n\7\3\7\3\7\3\7\3\7\3\7\5\7\u0185\n")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u018f\n\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u0196\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\7\7\u01b1\n\7\f\7\16\7\u01b4\13\7")
buf.write("\5\7\u01b6\n\7\3\7\5\7\u01b9\n\7\3\7\3\7\5\7\u01bd\n\7")
buf.write("\3\7\3\7\3\7\3\7\5\7\u01c3\n\7\3\7\3\7\3\7\5\7\u01c8\n")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\5\7\u01cf\n\7\3\7\3\7\3\7\5\7\u01d4")
buf.write("\n\7\3\7\3\7\5\7\u01d8\n\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7")
buf.write("\u01e0\n\7\3\7\3\7\3\7\3\7\5\7\u01e6\n\7\3\7\3\7\5\7\u01ea")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5")
buf.write("\7\u01f8\n\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0200\n\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u020b\n\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\7\7\u0222\n\7\f\7\16\7\u0225\13")
buf.write("\7\5\7\u0227\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0231")
buf.write("\n\7\3\7\3\7\5\7\u0235\n\7\3\7\3\7\3\7\3\7\3\7\5\7\u023c")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\7\7\u0244\n\7\f\7\16\7\u0247")
buf.write("\13\7\3\7\3\7\3\7\5\7\u024c\n\7\3\7\3\7\3\7\5\7\u0251")
buf.write("\n\7\3\7\3\7\5\7\u0255\n\7\3\7\3\7\3\7\3\7\5\7\u025b\n")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\7\7\u0262\n\7\f\7\16\7\u0265\13")
buf.write("\7\3\7\3\7\3\7\5\7\u026a\n\7\3\7\3\7\5\7\u026e\n\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\5\7\u0275\n\7\3\7\3\7\5\7\u0279\n\7\3")
buf.write("\7\3\7\3\7\3\7\7\7\u027f\n\7\f\7\16\7\u0282\13\7\3\7\3")
buf.write("\7\5\7\u0286\n\7\3\7\3\7\5\7\u028a\n\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\5\7\u0292\n\7\3\7\3\7\3\7\3\7\7\7\u0298\n\7\f")
buf.write("\7\16\7\u029b\13\7\3\7\3\7\5\7\u029f\n\7\3\7\3\7\5\7\u02a3")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u02ad\n\7\3\7")
buf.write("\3\7\3\7\7\7\u02b2\n\7\f\7\16\7\u02b5\13\7\3\7\3\7\5\7")
buf.write("\u02b9\n\7\3\7\3\7\5\7\u02bd\n\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\5\7\u02c7\n\7\3\7\5\7\u02ca\n\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\7\7\u02d1\n\7\f\7\16\7\u02d4\13\7\3\7\3\7\5\7")
buf.write("\u02d8\n\7\3\7\3\7\3\7\3\7\5\7\u02de\n\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\5\7\u02f6\n\7\3\7\3\7\3\7\3\7\5\7")
buf.write("\u02fc\n\7\5\7\u02fe\n\7\3\7\3\7\3\7\3\7\5\7\u0304\n\7")
buf.write("\3\7\3\7\3\7\3\7\5\7\u030a\n\7\5\7\u030c\n\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u0314\n\7\5\7\u0316\n\7\3\7\3\7\3\7")
buf.write("\3\7\5\7\u031c\n\7\3\7\3\7\3\7\3\7\5\7\u0322\n\7\5\7\u0324")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\5\7\u0333\n\7\3\7\3\7\3\7\5\7\u0338\n\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\5\7\u033f\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\5\7\u034b\n\7\5\7\u034d\n\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\5\7\u0355\n\7\5\7\u0357\n\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\7\7\u0367\n\7\f\7")
buf.write("\16\7\u036a\13\7\5\7\u036c\n\7\3\7\3\7\5\7\u0370\n\7\3")
buf.write("\7\3\7\5\7\u0374\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\7\7\u0384\n\7\f\7\16\7\u0387\13")
buf.write("\7\5\7\u0389\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\5\7\u0399\n\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\7\7\u03a1\n\7\f\7\16\7\u03a4\13\7\3\7\3\7\5\7\u03a8")
buf.write("\n\7\3\7\3\7\3\7\3\7\5\7\u03ae\n\7\3\7\5\7\u03b1\n\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\6\7\u03b8\n\7\r\7\16\7\u03b9\5\7\u03bc")
buf.write("\n\7\3\b\5\b\u03bf\n\b\3\b\3\b\3\t\3\t\5\t\u03c5\n\t\3")
buf.write("\t\3\t\3\t\7\t\u03ca\n\t\f\t\16\t\u03cd\13\t\3\n\3\n\5")
buf.write("\n\u03d1\n\n\3\13\3\13\3\13\3\13\5\13\u03d7\n\13\3\13")
buf.write("\3\13\5\13\u03db\n\13\3\13\3\13\5\13\u03df\n\13\3\f\3")
buf.write("\f\3\f\3\f\5\f\u03e5\n\f\3\r\3\r\3\r\3\r\3\16\3\16\3\16")
buf.write("\7\16\u03ee\n\16\f\16\16\16\u03f1\13\16\3\17\3\17\3\17")
buf.write("\3\17\3\20\3\20\3\20\3\20\3\20\3\20\7\20\u03fd\n\20\f")
buf.write("\20\16\20\u0400\13\20\5\20\u0402\n\20\3\20\3\20\3\20\5")
buf.write("\20\u0407\n\20\5\20\u0409\n\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\5\20\u0410\n\20\3\20\3\20\3\20\3\20\5\20\u0416\n\20\5")
buf.write("\20\u0418\n\20\3\21\3\21\5\21\u041c\n\21\3\22\3\22\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\23\5\23\u0426\n\23\3\23\3\23\3")
buf.write("\23\3\23\5\23\u042c\n\23\3\23\7\23\u042f\n\23\f\23\16")
buf.write("\23\u0432\13\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\7\24")
buf.write("\u043b\n\24\f\24\16\24\u043e\13\24\3\24\3\24\3\24\3\24")
buf.write("\5\24\u0444\n\24\3\25\3\25\5\25\u0448\n\25\3\25\3\25\5")
buf.write("\25\u044c\n\25\3\26\3\26\5\26\u0450\n\26\3\26\3\26\3\26")
buf.write("\7\26\u0455\n\26\f\26\16\26\u0458\13\26\3\26\3\26\3\26")
buf.write("\3\26\7\26\u045e\n\26\f\26\16\26\u0461\13\26\5\26\u0463")
buf.write("\n\26\3\26\3\26\5\26\u0467\n\26\3\26\3\26\3\26\5\26\u046c")
buf.write("\n\26\3\26\3\26\5\26\u0470\n\26\3\26\3\26\3\26\3\26\7")
buf.write("\26\u0476\n\26\f\26\16\26\u0479\13\26\5\26\u047b\n\26")
buf.write("\3\27\5\27\u047e\n\27\3\27\3\27\3\27\7\27\u0483\n\27\f")
buf.write("\27\16\27\u0486\13\27\3\30\3\30\3\30\3\30\3\30\3\30\7")
buf.write("\30\u048e\n\30\f\30\16\30\u0491\13\30\5\30\u0493\n\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u049b\n\30\f\30\16")
buf.write("\30\u049e\13\30\5\30\u04a0\n\30\3\30\3\30\3\30\3\30\3")
buf.write("\30\3\30\3\30\7\30\u04a9\n\30\f\30\16\30\u04ac\13\30\3")
buf.write("\30\3\30\5\30\u04b0\n\30\3\31\3\31\3\31\3\31\7\31\u04b6")
buf.write("\n\31\f\31\16\31\u04b9\13\31\5\31\u04bb\n\31\3\31\3\31")
buf.write("\5\31\u04bf\n\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\5")
buf.write("\33\u04c8\n\33\3\33\3\33\3\33\3\33\3\33\7\33\u04cf\n\33")
buf.write("\f\33\16\33\u04d2\13\33\5\33\u04d4\n\33\3\33\3\33\3\33")
buf.write("\3\33\3\33\7\33\u04db\n\33\f\33\16\33\u04de\13\33\5\33")
buf.write("\u04e0\n\33\3\33\5\33\u04e3\n\33\3\34\3\34\5\34\u04e7")
buf.write("\n\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\36\3\36\5\36")
buf.write("\u04f2\n\36\3\36\5\36\u04f5\n\36\3\36\3\36\3\36\3\36\3")
buf.write("\36\5\36\u04fc\n\36\3\36\5\36\u04ff\n\36\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\5\37\u0512\n\37\7\37\u0514\n\37\f\37\16")
buf.write("\37\u0517\13\37\3 \5 \u051a\n \3 \3 \5 \u051e\n \3 \3")
buf.write(" \5 \u0522\n \3 \3 \5 \u0526\n \5 \u0528\n \3!\3!\3!\3")
buf.write("!\3!\3!\3!\7!\u0531\n!\f!\16!\u0534\13!\3!\3!\5!\u0538")
buf.write("\n!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u0541\n\"\3#\3#\3")
buf.write("$\3$\3$\5$\u0548\n$\3$\5$\u054b\n$\3%\3%\3%\3%\5%\u0551")
buf.write("\n%\3&\3&\3&\3&\3&\3&\3&\3&\7&\u055b\n&\f&\16&\u055e\13")
buf.write("&\5&\u0560\n&\3&\3&\3&\3&\3&\7&\u0567\n&\f&\16&\u056a")
buf.write("\13&\5&\u056c\n&\3&\3&\3&\3&\7&\u0572\n&\f&\16&\u0575")
buf.write("\13&\5&\u0577\n&\3&\5&\u057a\n&\3&\3&\3&\5&\u057f\n&\3")
buf.write("&\5&\u0582\n&\3&\3&\3&\3&\3&\3&\3&\3&\7&\u058c\n&\f&\16")
buf.write("&\u058f\13&\5&\u0591\n&\3&\3&\3&\3&\7&\u0597\n&\f&\16")
buf.write("&\u059a\13&\3&\3&\5&\u059e\n&\3&\3&\5&\u05a2\n&\5&\u05a4")
buf.write("\n&\5&\u05a6\n&\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3")
buf.write("(\3(\5(\u05b5\n(\5(\u05b7\n(\3)\3)\3)\3)\3)\3)\3)\3)\3")
buf.write(")\5)\u05c2\n)\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3")
buf.write("*\3*\3*\3*\3*\3*\5*\u05d7\n*\3+\3+\3+\3+\3+\3+\7+\u05df")
buf.write("\n+\f+\16+\u05e2\13+\3+\3+\3,\3,\3,\3,\3-\3-\5-\u05ec")
buf.write("\n-\3-\3-\5-\u05f0\n-\5-\u05f2\n-\3.\3.\3.\3.\7.\u05f8")
buf.write("\n.\f.\16.\u05fb\13.\3.\3.\3/\3/\5/\u0601\n/\3/\3/\3/")
buf.write("\3/\3/\3/\3/\3/\3/\7/\u060c\n/\f/\16/\u060f\13/\3/\3/")
buf.write("\3/\5/\u0614\n/\3/\3/\3/\3/\3/\3/\3/\3/\3/\5/\u061f\n")
buf.write("/\3\60\3\60\3\61\3\61\3\61\5\61\u0626\n\61\3\61\3\61\5")
buf.write("\61\u062a\n\61\3\61\3\61\3\61\3\61\3\61\3\61\7\61\u0632")
buf.write("\n\61\f\61\16\61\u0635\13\61\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\62\3\62\3\62\5\62\u0641\n\62\3\62\3\62\3")
buf.write("\62\3\62\3\62\3\62\5\62\u0649\n\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\7\62\u0650\n\62\f\62\16\62\u0653\13\62\3\62\3\62")
buf.write("\3\62\5\62\u0658\n\62\3\62\3\62\3\62\3\62\3\62\3\62\5")
buf.write("\62\u0660\n\62\3\62\3\62\3\62\3\62\5\62\u0666\n\62\3\62")
buf.write("\3\62\5\62\u066a\n\62\3\62\3\62\3\62\5\62\u066f\n\62\3")
buf.write("\62\3\62\3\62\5\62\u0674\n\62\3\63\3\63\3\63\3\63\5\63")
buf.write("\u067a\n\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3")
buf.write("\63\3\63\3\63\3\63\7\63\u0688\n\63\f\63\16\63\u068b\13")
buf.write("\63\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\6\64\u06a6\n\64\r\64\16\64\u06a7")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\7\64\u06b1\n\64\f")
buf.write("\64\16\64\u06b4\13\64\3\64\3\64\3\64\3\64\3\64\5\64\u06bb")
buf.write("\n\64\3\64\3\64\3\64\5\64\u06c0\n\64\3\64\3\64\3\64\5")
buf.write("\64\u06c5\n\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\7\64\u06d0\n\64\f\64\16\64\u06d3\13\64\3\64\3\64")
buf.write("\3\64\5\64\u06d8\n\64\3\64\3\64\3\64\3\64\3\64\5\64\u06df")
buf.write("\n\64\3\64\3\64\3\64\5\64\u06e4\n\64\3\64\5\64\u06e7\n")
buf.write("\64\3\64\5\64\u06ea\n\64\3\64\3\64\3\64\5\64\u06ef\n\64")
buf.write("\3\64\3\64\3\64\7\64\u06f4\n\64\f\64\16\64\u06f7\13\64")
buf.write("\5\64\u06f9\n\64\3\64\3\64\3\64\3\64\3\64\7\64\u0700\n")
buf.write("\64\f\64\16\64\u0703\13\64\5\64\u0705\n\64\3\64\3\64\5")
buf.write("\64\u0709\n\64\3\64\5\64\u070c\n\64\3\64\5\64\u070f\n")
buf.write("\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\7\64\u071c\n\64\f\64\16\64\u071f\13\64\5\64\u0721")
buf.write("\n\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\6\64\u0732\n\64\r\64\16\64\u0733")
buf.write("\3\64\3\64\5\64\u0738\n\64\3\64\3\64\3\64\3\64\6\64\u073e")
buf.write("\n\64\r\64\16\64\u073f\3\64\3\64\5\64\u0744\n\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\7\64\u075b")
buf.write("\n\64\f\64\16\64\u075e\13\64\5\64\u0760\n\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\5\64\u0769\n\64\3\64\3\64\3")
buf.write("\64\3\64\5\64\u076f\n\64\3\64\3\64\3\64\3\64\5\64\u0775")
buf.write("\n\64\3\64\3\64\3\64\3\64\5\64\u077b\n\64\3\64\3\64\3")
buf.write("\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\5\64\u0788")
buf.write("\n\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\5\64\u0791\n")
buf.write("\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\7\64\u07a5\n")
buf.write("\64\f\64\16\64\u07a8\13\64\5\64\u07aa\n\64\3\64\5\64\u07ad")
buf.write("\n\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\7\64\u07b7")
buf.write("\n\64\f\64\16\64\u07ba\13\64\3\65\3\65\3\66\3\66\3\66")
buf.write("\3\66\5\66\u07c2\n\66\3\67\3\67\3\67\3\67\5\67\u07c8\n")
buf.write("\67\5\67\u07ca\n\67\38\38\38\38\38\38\58\u07d2\n8\39\3")
buf.write("9\3:\3:\3;\3;\3<\3<\5<\u07dc\n<\3<\3<\3<\3<\5<\u07e2\n")
buf.write("<\3=\3=\3>\3>\3?\3?\3?\3?\3?\3?\7?\u07ee\n?\f?\16?\u07f1")
buf.write("\13?\3?\3?\3?\3?\3?\3?\5?\u07f9\n?\3?\3?\3?\3?\3?\5?\u0800")
buf.write("\n?\3?\3?\3?\5?\u0805\n?\3?\3?\3?\3?\3?\5?\u080c\n?\3")
buf.write("?\3?\3?\3?\3?\3?\3?\3?\5?\u0816\n?\3?\3?\3?\5?\u081b\n")
buf.write("?\3?\3?\3?\3?\3?\5?\u0822\n?\3?\3?\3?\3?\3?\3?\3?\3?\3")
buf.write("?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\7?\u083a\n?\f")
buf.write("?\16?\u083d\13?\3?\3?\5?\u0841\n?\5?\u0843\n?\3?\3?\3")
buf.write("?\3?\3?\5?\u084a\n?\7?\u084c\n?\f?\16?\u084f\13?\3@\3")
buf.write("@\3@\3@\5@\u0855\n@\3A\3A\5A\u0859\nA\3B\3B\3B\3B\3B\3")
buf.write("C\3C\3C\3C\3C\3C\3D\3D\3D\3D\5D\u086a\nD\3D\3D\3D\3D\3")
buf.write("D\3D\3D\3D\3D\3D\3D\7D\u0877\nD\fD\16D\u087a\13D\3D\3")
buf.write("D\3D\3D\5D\u0880\nD\3D\3D\3D\3D\3D\3D\3D\5D\u0889\nD\3")
buf.write("D\3D\3D\3D\3D\3D\7D\u0891\nD\fD\16D\u0894\13D\3D\3D\5")
buf.write("D\u0898\nD\3D\3D\3D\3D\3D\7D\u089f\nD\fD\16D\u08a2\13")
buf.write("D\3D\3D\5D\u08a6\nD\3E\3E\3E\3E\3E\3E\5E\u08ae\nE\3F\3")
buf.write("F\3F\3F\7F\u08b4\nF\fF\16F\u08b7\13F\5F\u08b9\nF\3F\3")
buf.write("F\3F\3F\5F\u08bf\nF\3F\5F\u08c2\nF\3F\3F\3F\3F\3F\5F\u08c9")
buf.write("\nF\3F\3F\3F\3F\7F\u08cf\nF\fF\16F\u08d2\13F\5F\u08d4")
buf.write("\nF\3F\3F\3F\3F\7F\u08da\nF\fF\16F\u08dd\13F\5F\u08df")
buf.write("\nF\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3")
buf.write("G\3G\3G\3G\3G\3G\3G\3G\5G\u08f9\nG\3H\3H\3H\3H\3H\3H\3")
buf.write("H\3H\3H\5H\u0904\nH\3I\3I\3I\5I\u0909\nI\3I\3I\3I\3I\3")
buf.write("I\7I\u0910\nI\fI\16I\u0913\13I\3J\3J\3J\3J\3J\3J\3J\3")
buf.write("J\7J\u091d\nJ\fJ\16J\u0920\13J\3J\3J\3J\3J\3J\3J\3J\3")
buf.write("J\3J\3J\3J\3J\5J\u092e\nJ\3K\3K\5K\u0932\nK\3K\3K\5K\u0936")
buf.write("\nK\3K\3K\5K\u093a\nK\3K\3K\3K\3K\5K\u0940\nK\3K\3K\5")
buf.write("K\u0944\nK\3K\3K\5K\u0948\nK\3K\3K\5K\u094c\nK\5K\u094e")
buf.write("\nK\3L\3L\3L\3L\3M\3M\3M\3M\5M\u0958\nM\3N\3N\3N\3N\3")
buf.write("N\5N\u095f\nN\3O\3O\3O\3O\3O\3O\3O\5O\u0968\nO\3P\3P\3")
buf.write("P\3P\3P\5P\u096f\nP\3Q\3Q\3Q\3Q\3Q\5Q\u0976\nQ\3R\3R\3")
buf.write("R\7R\u097b\nR\fR\16R\u097e\13R\3S\3S\3T\3T\3T\7T\u0985")
buf.write("\nT\fT\16T\u0988\13T\3U\3U\3U\3U\3U\3U\3V\3V\3W\3W\3W")
buf.write("\5W\u0995\nW\3X\3X\3X\3X\3X\5X\u099c\nX\3Y\3Y\3Y\7Y\u09a1")
buf.write("\nY\fY\16Y\u09a4\13Y\3Z\3Z\3Z\3Z\3Z\5Z\u09ab\nZ\3[\5[")
buf.write("\u09ae\n[\3[\3[\5[\u09b2\n[\3[\3[\5[\u09b6\n[\3[\5[\u09b9")
buf.write("\n[\3\\\3\\\3\\\2\t$<`df|\u0090]\2\4\6\b\n\f\16\20\22")
buf.write("\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPR")
buf.write("TVXZ\\^`bdfhjlnprtvxz|~\u0080\u0082\u0084\u0086\u0088")
buf.write("\u008a\u008c\u008e\u0090\u0092\u0094\u0096\u0098\u009a")
buf.write("\u009c\u009e\u00a0\u00a2\u00a4\u00a6\u00a8\u00aa\u00ac")
buf.write("\u00ae\u00b0\u00b2\u00b4\u00b6\2\37\4\2$$\u00bf\u00bf")
buf.write("\4\2??ss\4\2\u00c9\u00c9\u00da\u00da\4\2\\\\jj\4\2OOk")
buf.write("k\3\2\u00c6\u00c7\4\2XX\u008f\u008f\4\2\u010d\u010d\u0111")
buf.write("\u0111\4\2NN\u00eb\u00eb\4\2\35\35BB\4\2XXyy\4\2\26\26")
buf.write("EE\4\2 \u00d9\u00d9\4\2ll\u00cd\u00cd\3\2\u0107\u0108")
buf.write("\3\2\u0109\u010b\4\2WW\u00c8\u00c8\3\2\u0101\u0106\5\2")
buf.write("\26\26\32\32\u00d4\u00d4\4\2TT\u00e4\u00e4\7\2==gg\u008c")
buf.write("\u008d\u00cb\u00cb\u00ff\u00ff\3\2\u0090\u0093\4\2YY\u00b1")
buf.write("\u00b1\5\2bbxx\u00dd\u00dd\6\2FFtt\u0083\u0083\u00f2\u00f2")
buf.write("\4\2\u00a0\u00a0\u00fe\u00fe\7\2//@@oo\u00ce\u00ce\u00ee")
buf.write("\u00ee\4\2\u00e1\u00e1\u00f5\u00f5\66\2\23\26\30\30\32")
buf.write("\33\35 #$\',..\62\62;=??ABDDFGJJLLOORRUY[[^beegiklnnq")
buf.write("qstvvxz||~~\u0080\u0080\u0083\u008d\u008f\u0095\u0099")
buf.write("\u009d\u009f\u00a1\u00a4\u00a4\u00a6\u00b2\u00b4\u00b7")
buf.write("\u00b9\u00c0\u00c2\u00c4\u00c6\u00cd\u00cf\u00d9\u00db")
buf.write("\u00dd\u00df\u00e3\u00e5\u00e7\u00e9\u00ea\u00ec\u00ec")
buf.write("\u00ee\u00f0\u00f2\u00f2\u00f4\u00f6\u00f9\u00f9\u00fb")
buf.write("\u0100\2\u0b4c\2\u00b8\3\2\2\2\4\u00bb\3\2\2\2\6\u00be")
buf.write("\3\2\2\2\b\u00c1\3\2\2\2\n\u00c4\3\2\2\2\f\u03bb\3\2\2")
buf.write("\2\16\u03be\3\2\2\2\20\u03c2\3\2\2\2\22\u03d0\3\2\2\2")
buf.write("\24\u03d2\3\2\2\2\26\u03e0\3\2\2\2\30\u03e6\3\2\2\2\32")
buf.write("\u03ea\3\2\2\2\34\u03f2\3\2\2\2\36\u03f6\3\2\2\2 \u041b")
buf.write("\3\2\2\2\"\u041d\3\2\2\2$\u041f\3\2\2\2&\u0443\3\2\2\2")
buf.write("(\u0445\3\2\2\2*\u044d\3\2\2\2,\u047d\3\2\2\2.\u04af\3")
buf.write("\2\2\2\60\u04be\3\2\2\2\62\u04c0\3\2\2\2\64\u04c7\3\2")
buf.write("\2\2\66\u04e4\3\2\2\28\u04ed\3\2\2\2:\u04fe\3\2\2\2<\u0500")
buf.write("\3\2\2\2>\u0527\3\2\2\2@\u0537\3\2\2\2B\u0539\3\2\2\2")
buf.write("D\u0542\3\2\2\2F\u054a\3\2\2\2H\u0550\3\2\2\2J\u0552\3")
buf.write("\2\2\2L\u05a7\3\2\2\2N\u05b6\3\2\2\2P\u05c1\3\2\2\2R\u05d6")
buf.write("\3\2\2\2T\u05d8\3\2\2\2V\u05e5\3\2\2\2X\u05e9\3\2\2\2")
buf.write("Z\u05f3\3\2\2\2\\\u061e\3\2\2\2^\u0620\3\2\2\2`\u0629")
buf.write("\3\2\2\2b\u0673\3\2\2\2d\u0679\3\2\2\2f\u07ac\3\2\2\2")
buf.write("h\u07bb\3\2\2\2j\u07c1\3\2\2\2l\u07c9\3\2\2\2n\u07d1\3")
buf.write("\2\2\2p\u07d3\3\2\2\2r\u07d5\3\2\2\2t\u07d7\3\2\2\2v\u07d9")
buf.write("\3\2\2\2x\u07e3\3\2\2\2z\u07e5\3\2\2\2|\u0842\3\2\2\2")
buf.write("~\u0854\3\2\2\2\u0080\u0858\3\2\2\2\u0082\u085a\3\2\2")
buf.write("\2\u0084\u085f\3\2\2\2\u0086\u08a5\3\2\2\2\u0088\u08a7")
buf.write("\3\2\2\2\u008a\u08b8\3\2\2\2\u008c\u08f8\3\2\2\2\u008e")
buf.write("\u0903\3\2\2\2\u0090\u0905\3\2\2\2\u0092\u092d\3\2\2\2")
buf.write("\u0094\u094d\3\2\2\2\u0096\u094f\3\2\2\2\u0098\u0957\3")
buf.write("\2\2\2\u009a\u095e\3\2\2\2\u009c\u0967\3\2\2\2\u009e\u096e")
buf.write("\3\2\2\2\u00a0\u0975\3\2\2\2\u00a2\u0977\3\2\2\2\u00a4")
buf.write("\u097f\3\2\2\2\u00a6\u0981\3\2\2\2\u00a8\u0989\3\2\2\2")
buf.write("\u00aa\u098f\3\2\2\2\u00ac\u0994\3\2\2\2\u00ae\u099b\3")
buf.write("\2\2\2\u00b0\u099d\3\2\2\2\u00b2\u09aa\3\2\2\2\u00b4\u09b8")
buf.write("\3\2\2\2\u00b6\u09ba\3\2\2\2\u00b8\u00b9\5\f\7\2\u00b9")
buf.write("\u00ba\7\2\2\3\u00ba\3\3\2\2\2\u00bb\u00bc\5^\60\2\u00bc")
buf.write("\u00bd\7\2\2\3\u00bd\5\3\2\2\2\u00be\u00bf\5\u00a2R\2")
buf.write("\u00bf\u00c0\7\2\2\3\u00c0\7\3\2\2\2\u00c1\u00c2\5|?\2")
buf.write("\u00c2\u00c3\7\2\2\3\u00c3\t\3\2\2\2\u00c4\u00c5\5\u0090")
buf.write("I\2\u00c5\u00c6\7\2\2\3\u00c6\13\3\2\2\2\u00c7\u03bc\5")
buf.write("\16\b\2\u00c8\u00c9\7\u00ef\2\2\u00c9\u03bc\5\u00b2Z\2")
buf.write("\u00ca\u00cb\7\u00ef\2\2\u00cb\u00cc\5\u00b2Z\2\u00cc")
buf.write("\u00cd\7\3\2\2\u00cd\u00ce\5\u00b2Z\2\u00ce\u03bc\3\2")
buf.write("\2\2\u00cf\u00d0\7/\2\2\u00d0\u00d4\7\u00c9\2\2\u00d1")
buf.write("\u00d2\7h\2\2\u00d2\u00d3\7\u0097\2\2\u00d3\u00d5\7Q\2")
buf.write("\2\u00d4\u00d1\3\2\2\2\u00d4\u00d5\3\2\2\2\u00d5\u00d6")
buf.write("\3\2\2\2\u00d6\u00d9\5\u00a6T\2\u00d7\u00d8\7\37\2\2\u00d8")
buf.write("\u00da\5\u00aeX\2\u00d9\u00d7\3\2\2\2\u00d9\u00da\3\2")
buf.write("\2\2\u00da\u00dd\3\2\2\2\u00db\u00dc\7\u00fa\2\2\u00dc")
buf.write("\u00de\5\30\r\2\u00dd\u00db\3\2\2\2\u00dd\u00de\3\2\2")
buf.write("\2\u00de\u03bc\3\2\2\2\u00df\u00e0\7H\2\2\u00e0\u00e3")
buf.write("\7\u00c9\2\2\u00e1\u00e2\7h\2\2\u00e2\u00e4\7Q\2\2\u00e3")
buf.write("\u00e1\3\2\2\2\u00e3\u00e4\3\2\2\2\u00e4\u00e5\3\2\2\2")
buf.write("\u00e5\u00e7\5\u00a6T\2\u00e6\u00e8\t\2\2\2\u00e7\u00e6")
buf.write("\3\2\2\2\u00e7\u00e8\3\2\2\2\u00e8\u03bc\3\2\2\2\u00e9")
buf.write("\u00ea\7\27\2\2\u00ea\u00eb\7\u00c9\2\2\u00eb\u00ec\5")
buf.write("\u00a6T\2\u00ec\u00ed\7\u00ba\2\2\u00ed\u00ee\7\u00e2")
buf.write("\2\2\u00ee\u00ef\5\u00b2Z\2\u00ef\u03bc\3\2\2\2\u00f0")
buf.write("\u00f1\7\27\2\2\u00f1\u00f2\7\u00c9\2\2\u00f2\u00f3\5")
buf.write("\u00a6T\2\u00f3\u00f4\7\u00d1\2\2\u00f4\u00f5\7\37\2\2")
buf.write("\u00f5\u00f6\5\u00aeX\2\u00f6\u03bc\3\2\2\2\u00f7\u00f8")
buf.write("\7/\2\2\u00f8\u00fc\7\u00da\2\2\u00f9\u00fa\7h\2\2\u00fa")
buf.write("\u00fb\7\u0097\2\2\u00fb\u00fd\7Q\2\2\u00fc\u00f9\3\2")
buf.write("\2\2\u00fc\u00fd\3\2\2\2\u00fd\u00fe\3\2\2\2\u00fe\u0100")
buf.write("\5\u00a6T\2\u00ff\u0101\5Z.\2\u0100\u00ff\3\2\2\2\u0100")
buf.write("\u0101\3\2\2\2\u0101\u0104\3\2\2\2\u0102\u0103\7*\2\2")
buf.write("\u0103\u0105\5l\67\2\u0104\u0102\3\2\2\2\u0104\u0105\3")
buf.write("\2\2\2\u0105\u0108\3\2\2\2\u0106\u0107\7\u00fa\2\2\u0107")
buf.write("\u0109\5\30\r\2\u0108\u0106\3\2\2\2\u0108\u0109\3\2\2")
buf.write("\2\u0109\u010a\3\2\2\2\u010a\u0110\7\34\2\2\u010b\u0111")
buf.write("\5\16\b\2\u010c\u010d\7\4\2\2\u010d\u010e\5\16\b\2\u010e")
buf.write("\u010f\7\5\2\2\u010f\u0111\3\2\2\2\u0110\u010b\3\2\2\2")
buf.write("\u0110\u010c\3\2\2\2\u0111\u0117\3\2\2\2\u0112\u0114\7")
buf.write("\u00fa\2\2\u0113\u0115\7\u0094\2\2\u0114\u0113\3\2\2\2")
buf.write("\u0114\u0115\3\2\2\2\u0115\u0116\3\2\2\2\u0116\u0118\7")
buf.write(";\2\2\u0117\u0112\3\2\2\2\u0117\u0118\3\2\2\2\u0118\u03bc")
buf.write("\3\2\2\2\u0119\u011a\7/\2\2\u011a\u011e\7\u00da\2\2\u011b")
buf.write("\u011c\7h\2\2\u011c\u011d\7\u0097\2\2\u011d\u011f\7Q\2")
buf.write("\2\u011e\u011b\3\2\2\2\u011e\u011f\3\2\2\2\u011f\u0120")
buf.write("\3\2\2\2\u0120\u0121\5\u00a6T\2\u0121\u0122\7\4\2\2\u0122")
buf.write("\u0127\5\22\n\2\u0123\u0124\7\6\2\2\u0124\u0126\5\22\n")
buf.write("\2\u0125\u0123\3\2\2\2\u0126\u0129\3\2\2\2\u0127\u0125")
buf.write("\3\2\2\2\u0127\u0128\3\2\2\2\u0128\u012a\3\2\2\2\u0129")
buf.write("\u0127\3\2\2\2\u012a\u012d\7\5\2\2\u012b\u012c\7*\2\2")
buf.write("\u012c\u012e\5l\67\2\u012d\u012b\3\2\2\2\u012d\u012e\3")
buf.write("\2\2\2\u012e\u0131\3\2\2\2\u012f\u0130\7\u00fa\2\2\u0130")
buf.write("\u0132\5\30\r\2\u0131\u012f\3\2\2\2\u0131\u0132\3\2\2")
buf.write("\2\u0132\u03bc\3\2\2\2\u0133\u0134\7H\2\2\u0134\u0137")
buf.write("\7\u00da\2\2\u0135\u0136\7h\2\2\u0136\u0138\7Q\2\2\u0137")
buf.write("\u0135\3\2\2\2\u0137\u0138\3\2\2\2\u0138\u0139\3\2\2\2")
buf.write("\u0139\u03bc\5\u00a6T\2\u013a\u013b\7o\2\2\u013b\u013c")
buf.write("\7r\2\2\u013c\u013e\5\u00a6T\2\u013d\u013f\5Z.\2\u013e")
buf.write("\u013d\3\2\2\2\u013e\u013f\3\2\2\2\u013f\u0140\3\2\2\2")
buf.write("\u0140\u0141\5\16\b\2\u0141\u03bc\3\2\2\2\u0142\u0143")
buf.write("\7@\2\2\u0143\u0144\7\\\2\2\u0144\u0147\5\u00a6T\2\u0145")
buf.write("\u0146\7\u00f8\2\2\u0146\u0148\5`\61\2\u0147\u0145\3\2")
buf.write("\2\2\u0147\u0148\3\2\2\2\u0148\u03bc\3\2\2\2\u0149\u014a")
buf.write("\7\u00e5\2\2\u014a\u014b\7\u00da\2\2\u014b\u03bc\5\u00a6")
buf.write("T\2\u014c\u014d\7*\2\2\u014d\u014e\7\u009e\2\2\u014e\u014f")
buf.write("\7\u00da\2\2\u014f\u0150\5\u00a6T\2\u0150\u0153\7u\2\2")
buf.write("\u0151\u0154\5l\67\2\u0152\u0154\7\u0098\2\2\u0153\u0151")
buf.write("\3\2\2\2\u0153\u0152\3\2\2\2\u0154\u03bc\3\2\2\2\u0155")
buf.write("\u0156\7*\2\2\u0156\u0157\7\u009e\2\2\u0157\u0158\7(\2")
buf.write("\2\u0158\u0159\5\u00a6T\2\u0159\u015c\7u\2\2\u015a\u015d")
buf.write("\5l\67\2\u015b\u015d\7\u0098\2\2\u015c\u015a\3\2\2\2\u015c")
buf.write("\u015b\3\2\2\2\u015d\u03bc\3\2\2\2\u015e\u015f\7\27\2")
buf.write("\2\u015f\u0162\7\u00da\2\2\u0160\u0161\7h\2\2\u0161\u0163")
buf.write("\7Q\2\2\u0162\u0160\3\2\2\2\u0162\u0163\3\2\2\2\u0163")
buf.write("\u0164\3\2\2\2\u0164\u0165\5\u00a6T\2\u0165\u0166\7\u00ba")
buf.write("\2\2\u0166\u0167\7\u00e2\2\2\u0167\u0168\5\u00a6T\2\u0168")
buf.write("\u03bc\3\2\2\2\u0169\u016a\7\27\2\2\u016a\u016d\7\u00da")
buf.write("\2\2\u016b\u016c\7h\2\2\u016c\u016e\7Q\2\2\u016d\u016b")
buf.write("\3\2\2\2\u016d\u016e\3\2\2\2\u016e\u016f\3\2\2\2\u016f")
buf.write("\u0170\5\u00a6T\2\u0170\u0171\7\23\2\2\u0171\u0175\7(")
buf.write("\2\2\u0172\u0173\7h\2\2\u0173\u0174\7\u0097\2\2\u0174")
buf.write("\u0176\7Q\2\2\u0175\u0172\3\2\2\2\u0175\u0176\3\2\2\2")
buf.write("\u0176\u0177\3\2\2\2\u0177\u0178\5\24\13\2\u0178\u03bc")
buf.write("\3\2\2\2\u0179\u017a\7\27\2\2\u017a\u017d\7\u00da\2\2")
buf.write("\u017b\u017c\7h\2\2\u017c\u017e\7Q\2\2\u017d\u017b\3\2")
buf.write("\2\2\u017d\u017e\3\2\2\2\u017e\u017f\3\2\2\2\u017f\u0180")
buf.write("\5\u00a6T\2\u0180\u0181\7\u00ba\2\2\u0181\u0184\7(\2\2")
buf.write("\u0182\u0183\7h\2\2\u0183\u0185\7Q\2\2\u0184\u0182\3\2")
buf.write("\2\2\u0184\u0185\3\2\2\2\u0185\u0186\3\2\2\2\u0186\u0187")
buf.write("\5\u00b2Z\2\u0187\u0188\7\u00e2\2\2\u0188\u0189\5\u00b2")
buf.write("Z\2\u0189\u03bc\3\2\2\2\u018a\u018b\7\27\2\2\u018b\u018e")
buf.write("\7\u00da\2\2\u018c\u018d\7h\2\2\u018d\u018f\7Q\2\2\u018e")
buf.write("\u018c\3\2\2\2\u018e\u018f\3\2\2\2\u018f\u0190\3\2\2\2")
buf.write("\u0190\u0191\5\u00a6T\2\u0191\u0192\7H\2\2\u0192\u0195")
buf.write("\7(\2\2\u0193\u0194\7h\2\2\u0194\u0196\7Q\2\2\u0195\u0193")
buf.write("\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0197\3\2\2\2\u0197")
buf.write("\u0198\5\u00a6T\2\u0198\u03bc\3\2\2\2\u0199\u019a\7\27")
buf.write("\2\2\u019a\u019b\7\u00da\2\2\u019b\u019c\5\u00a6T\2\u019c")
buf.write("\u019d\7\u00d1\2\2\u019d\u019e\7\37\2\2\u019e\u019f\5")
buf.write("\u00aeX\2\u019f\u03bc\3\2\2\2\u01a0\u01a1\7\27\2\2\u01a1")
buf.write("\u01a2\7\u00da\2\2\u01a2\u01a3\5\u00a6T\2\u01a3\u01a4")
buf.write("\7\u00d1\2\2\u01a4\u01a5\7\u00b5\2\2\u01a5\u01a6\5\32")
buf.write("\16\2\u01a6\u03bc\3\2\2\2\u01a7\u01a8\7\27\2\2\u01a8\u01a9")
buf.write("\7\u00da\2\2\u01a9\u01aa\5\u00a6T\2\u01aa\u01ab\7P\2\2")
buf.write("\u01ab\u01b8\5\u00b2Z\2\u01ac\u01b5\7\4\2\2\u01ad\u01b2")
buf.write("\5\u009eP\2\u01ae\u01af\7\6\2\2\u01af\u01b1\5\u009eP\2")
buf.write("\u01b0\u01ae\3\2\2\2\u01b1\u01b4\3\2\2\2\u01b2\u01b0\3")
buf.write("\2\2\2\u01b2\u01b3\3\2\2\2\u01b3\u01b6\3\2\2\2\u01b4\u01b2")
buf.write("\3\2\2\2\u01b5\u01ad\3\2\2\2\u01b5\u01b6\3\2\2\2\u01b6")
buf.write("\u01b7\3\2\2\2\u01b7\u01b9\7\5\2\2\u01b8\u01ac\3\2\2\2")
buf.write("\u01b8\u01b9\3\2\2\2\u01b9\u01bc\3\2\2\2\u01ba\u01bb\7")
buf.write("\u00f8\2\2\u01bb\u01bd\5`\61\2\u01bc\u01ba\3\2\2\2\u01bc")
buf.write("\u01bd\3\2\2\2\u01bd\u03bc\3\2\2\2\u01be\u01bf\7\30\2")
buf.write("\2\u01bf\u01c2\5\u00a6T\2\u01c0\u01c1\7\u00fa\2\2\u01c1")
buf.write("\u01c3\5\30\r\2\u01c2\u01c0\3\2\2\2\u01c2\u01c3\3\2\2")
buf.write("\2\u01c3\u03bc\3\2\2\2\u01c4\u01c7\7/\2\2\u01c5\u01c6")
buf.write("\7\u00a2\2\2\u01c6\u01c8\7\u00bc\2\2\u01c7\u01c5\3\2\2")
buf.write("\2\u01c7\u01c8\3\2\2\2\u01c8\u01c9\3\2\2\2\u01c9\u01ca")
buf.write("\7\u0089\2\2\u01ca\u01ce\7\u00f6\2\2\u01cb\u01cc\7h\2")
buf.write("\2\u01cc\u01cd\7\u0097\2\2\u01cd\u01cf\7Q\2\2\u01ce\u01cb")
buf.write("\3\2\2\2\u01ce\u01cf\3\2\2\2\u01cf\u01d0\3\2\2\2\u01d0")
buf.write("\u01d3\5\u00a6T\2\u01d1\u01d2\7*\2\2\u01d2\u01d4\5l\67")
buf.write("\2\u01d3\u01d1\3\2\2\2\u01d3\u01d4\3\2\2\2\u01d4\u01d7")
buf.write("\3\2\2\2\u01d5\u01d6\7\u00fa\2\2\u01d6\u01d8\5\30\r\2")
buf.write("\u01d7\u01d5\3\2\2\2\u01d7\u01d8\3\2\2\2\u01d8\u01d9\3")
buf.write("\2\2\2\u01d9\u01da\7\34\2\2\u01da\u01db\5\16\b\2\u01db")
buf.write("\u03bc\3\2\2\2\u01dc\u01df\7/\2\2\u01dd\u01de\7\u00a2")
buf.write("\2\2\u01de\u01e0\7\u00bc\2\2\u01df\u01dd\3\2\2\2\u01df")
buf.write("\u01e0\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e2\7\u00f6")
buf.write("\2\2\u01e2\u01e5\5\u00a6T\2\u01e3\u01e4\7*\2\2\u01e4\u01e6")
buf.write("\5l\67\2\u01e5\u01e3\3\2\2\2\u01e5\u01e6\3\2\2\2\u01e6")
buf.write("\u01e9\3\2\2\2\u01e7\u01e8\7\u00cc\2\2\u01e8\u01ea\t\3")
buf.write("\2\2\u01e9\u01e7\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea\u01eb")
buf.write("\3\2\2\2\u01eb\u01ec\7\34\2\2\u01ec\u01ed\5\16\b\2\u01ed")
buf.write("\u03bc\3\2\2\2\u01ee\u01ef\7\u00b9\2\2\u01ef\u01f0\7\u0089")
buf.write("\2\2\u01f0\u01f1\7\u00f6\2\2\u01f1\u03bc\5\u00a6T\2\u01f2")
buf.write("\u01f3\7H\2\2\u01f3\u01f4\7\u0089\2\2\u01f4\u01f7\7\u00f6")
buf.write("\2\2\u01f5\u01f6\7h\2\2\u01f6\u01f8\7Q\2\2\u01f7\u01f5")
buf.write("\3\2\2\2\u01f7\u01f8\3\2\2\2\u01f8\u01f9\3\2\2\2\u01f9")
buf.write("\u03bc\5\u00a6T\2\u01fa\u01fb\7\27\2\2\u01fb\u01fc\7\u0089")
buf.write("\2\2\u01fc\u01ff\7\u00f6\2\2\u01fd\u01fe\7h\2\2\u01fe")
buf.write("\u0200\7Q\2\2\u01ff\u01fd\3\2\2\2\u01ff\u0200\3\2\2\2")
buf.write("\u0200\u0201\3\2\2\2\u0201\u0202\5\u00a6T\2\u0202\u0203")
buf.write("\7\u00ba\2\2\u0203\u0204\7\u00e2\2\2\u0204\u0205\5\u00a6")
buf.write("T\2\u0205\u03bc\3\2\2\2\u0206\u0207\7H\2\2\u0207\u020a")
buf.write("\7\u00f6\2\2\u0208\u0209\7h\2\2\u0209\u020b\7Q\2\2\u020a")
buf.write("\u0208\3\2\2\2\u020a\u020b\3\2\2\2\u020b\u020c\3\2\2\2")
buf.write("\u020c\u03bc\5\u00a6T\2\u020d\u020e\7\27\2\2\u020e\u020f")
buf.write("\7\u00f6\2\2\u020f\u0210\5\u00a6T\2\u0210\u0211\7\u00ba")
buf.write("\2\2\u0211\u0212\7\u00e2\2\2\u0212\u0213\5\u00a6T\2\u0213")
buf.write("\u03bc\3\2\2\2\u0214\u0215\7\27\2\2\u0215\u0216\7\u00f6")
buf.write("\2\2\u0216\u0217\5\u00a6T\2\u0217\u0218\7\u00d1\2\2\u0218")
buf.write("\u0219\7\37\2\2\u0219\u021a\5\u00aeX\2\u021a\u03bc\3\2")
buf.write("\2\2\u021b\u021c\7#\2\2\u021c\u021d\5\u00a6T\2\u021d\u0226")
buf.write("\7\4\2\2\u021e\u0223\5\u009eP\2\u021f\u0220\7\6\2\2\u0220")
buf.write("\u0222\5\u009eP\2\u0221\u021f\3\2\2\2\u0222\u0225\3\2")
buf.write("\2\2\u0223\u0221\3\2\2\2\u0223\u0224\3\2\2\2\u0224\u0227")
buf.write("\3\2\2\2\u0225\u0223\3\2\2\2\u0226\u021e\3\2\2\2\u0226")
buf.write("\u0227\3\2\2\2\u0227\u0228\3\2\2\2\u0228\u0229\7\5\2\2")
buf.write("\u0229\u03bc\3\2\2\2\u022a\u022b\7/\2\2\u022b\u022c\7")
buf.write("\u00c2\2\2\u022c\u0230\5\u00b2Z\2\u022d\u022e\7\u00fa")
buf.write("\2\2\u022e\u022f\7\24\2\2\u022f\u0231\5\u00acW\2\u0230")
buf.write("\u022d\3\2\2\2\u0230\u0231\3\2\2\2\u0231\u0234\3\2\2\2")
buf.write("\u0232\u0233\7j\2\2\u0233\u0235\5\u00b2Z\2\u0234\u0232")
buf.write("\3\2\2\2\u0234\u0235\3\2\2\2\u0235\u03bc\3\2\2\2\u0236")
buf.write("\u0237\7H\2\2\u0237\u0238\7\u00c2\2\2\u0238\u023b\5\u00b2")
buf.write("Z\2\u0239\u023a\7j\2\2\u023a\u023c\5\u00b2Z\2\u023b\u0239")
buf.write("\3\2\2\2\u023b\u023c\3\2\2\2\u023c\u03bc\3\2\2\2\u023d")
buf.write("\u023e\7_\2\2\u023e\u023f\5\u00b0Y\2\u023f\u0240\7\u00e2")
buf.write("\2\2\u0240\u0245\5\u00aeX\2\u0241\u0242\7\6\2\2\u0242")
buf.write("\u0244\5\u00aeX\2\u0243\u0241\3\2\2\2\u0244\u0247\3\2")
buf.write("\2\2\u0245\u0243\3\2\2\2\u0245\u0246\3\2\2\2\u0246\u024b")
buf.write("\3\2\2\2\u0247\u0245\3\2\2\2\u0248\u0249\7\u00fa\2\2\u0249")
buf.write("\u024a\7\24\2\2\u024a\u024c\7\u00a1\2\2\u024b\u0248\3")
buf.write("\2\2\2\u024b\u024c\3\2\2\2\u024c\u0250\3\2\2\2\u024d\u024e")
buf.write("\7`\2\2\u024e\u024f\7\"\2\2\u024f\u0251\5\u00acW\2\u0250")
buf.write("\u024d\3\2\2\2\u0250\u0251\3\2\2\2\u0251\u0254\3\2\2\2")
buf.write("\u0252\u0253\7j\2\2\u0253\u0255\5\u00b2Z\2\u0254\u0252")
buf.write("\3\2\2\2\u0254\u0255\3\2\2\2\u0255\u03bc\3\2\2\2\u0256")
buf.write("\u025a\7\u00c0\2\2\u0257\u0258\7\24\2\2\u0258\u0259\7")
buf.write("\u00a1\2\2\u0259\u025b\7Z\2\2\u025a\u0257\3\2\2\2\u025a")
buf.write("\u025b\3\2\2\2\u025b\u025c\3\2\2\2\u025c\u025d\5\u00b0")
buf.write("Y\2\u025d\u025e\7\\\2\2\u025e\u0263\5\u00aeX\2\u025f\u0260")
buf.write("\7\6\2\2\u0260\u0262\5\u00aeX\2\u0261\u025f\3\2\2\2\u0262")
buf.write("\u0265\3\2\2\2\u0263\u0261\3\2\2\2\u0263\u0264\3\2\2\2")
buf.write("\u0264\u0269\3\2\2\2\u0265\u0263\3\2\2\2\u0266\u0267\7")
buf.write("`\2\2\u0267\u0268\7\"\2\2\u0268\u026a\5\u00acW\2\u0269")
buf.write("\u0266\3\2\2\2\u0269\u026a\3\2\2\2\u026a\u026d\3\2\2\2")
buf.write("\u026b\u026c\7j\2\2\u026c\u026e\5\u00b2Z\2\u026d\u026b")
buf.write("\3\2\2\2\u026d\u026e\3\2\2\2\u026e\u03bc\3\2\2\2\u026f")
buf.write("\u0270\7\u00d1\2\2\u0270\u0274\7\u00c2\2\2\u0271\u0275")
buf.write("\7\26\2\2\u0272\u0275\7\u0095\2\2\u0273\u0275\5\u00b2")
buf.write("Z\2\u0274\u0271\3\2\2\2\u0274\u0272\3\2\2\2\u0274\u0273")
buf.write("\3\2\2\2\u0275\u0278\3\2\2\2\u0276\u0277\7j\2\2\u0277")
buf.write("\u0279\5\u00b2Z\2\u0278\u0276\3\2\2\2\u0278\u0279\3\2")
buf.write("\2\2\u0279\u03bc\3\2\2\2\u027a\u0285\7_\2\2\u027b\u0280")
buf.write("\5\u00a4S\2\u027c\u027d\7\6\2\2\u027d\u027f\5\u00a4S\2")
buf.write("\u027e\u027c\3\2\2\2\u027f\u0282\3\2\2\2\u0280\u027e\3")
buf.write("\2\2\2\u0280\u0281\3\2\2\2\u0281\u0286\3\2\2\2\u0282\u0280")
buf.write("\3\2\2\2\u0283\u0284\7\26\2\2\u0284\u0286\7\u00b4\2\2")
buf.write("\u0285\u027b\3\2\2\2\u0285\u0283\3\2\2\2\u0286\u0287\3")
buf.write("\2\2\2\u0287\u0289\7\u009e\2\2\u0288\u028a\t\4\2\2\u0289")
buf.write("\u0288\3\2\2\2\u0289\u028a\3\2\2\2\u028a\u028b\3\2\2\2")
buf.write("\u028b\u028c\5\u00a6T\2\u028c\u028d\7\u00e2\2\2\u028d")
buf.write("\u0291\5\u00aeX\2\u028e\u028f\7\u00fa\2\2\u028f\u0290")
buf.write("\7_\2\2\u0290\u0292\7\u00a1\2\2\u0291\u028e\3\2\2\2\u0291")
buf.write("\u0292\3\2\2\2\u0292\u03bc\3\2\2\2\u0293\u029e\7A\2\2")
buf.write("\u0294\u0299\5\u00a4S\2\u0295\u0296\7\6\2\2\u0296\u0298")
buf.write("\5\u00a4S\2\u0297\u0295\3\2\2\2\u0298\u029b\3\2\2\2\u0299")
buf.write("\u0297\3\2\2\2\u0299\u029a\3\2\2\2\u029a\u029f\3\2\2\2")
buf.write("\u029b\u0299\3\2\2\2\u029c\u029d\7\26\2\2\u029d\u029f")
buf.write("\7\u00b4\2\2\u029e\u0294\3\2\2\2\u029e\u029c\3\2\2\2\u029f")
buf.write("\u02a0\3\2\2\2\u02a0\u02a2\7\u009e\2\2\u02a1\u02a3\t\4")
buf.write("\2\2\u02a2\u02a1\3\2\2\2\u02a2\u02a3\3\2\2\2\u02a3\u02a4")
buf.write("\3\2\2\2\u02a4\u02a5\5\u00a6T\2\u02a5\u02a6\7\u00e2\2")
buf.write("\2\u02a6\u02a7\5\u00aeX\2\u02a7\u03bc\3\2\2\2\u02a8\u02ac")
buf.write("\7\u00c0\2\2\u02a9\u02aa\7_\2\2\u02aa\u02ab\7\u00a1\2")
buf.write("\2\u02ab\u02ad\7Z\2\2\u02ac\u02a9\3\2\2\2\u02ac\u02ad")
buf.write("\3\2\2\2\u02ad\u02b8\3\2\2\2\u02ae\u02b3\5\u00a4S\2\u02af")
buf.write("\u02b0\7\6\2\2\u02b0\u02b2\5\u00a4S\2\u02b1\u02af\3\2")
buf.write("\2\2\u02b2\u02b5\3\2\2\2\u02b3\u02b1\3\2\2\2\u02b3\u02b4")
buf.write("\3\2\2\2\u02b4\u02b9\3\2\2\2\u02b5\u02b3\3\2\2\2\u02b6")
buf.write("\u02b7\7\26\2\2\u02b7\u02b9\7\u00b4\2\2\u02b8\u02ae\3")
buf.write("\2\2\2\u02b8\u02b6\3\2\2\2\u02b9\u02ba\3\2\2\2\u02ba\u02bc")
buf.write("\7\u009e\2\2\u02bb\u02bd\t\4\2\2\u02bc\u02bb\3\2\2\2\u02bc")
buf.write("\u02bd\3\2\2\2\u02bd\u02be\3\2\2\2\u02be\u02bf\5\u00a6")
buf.write("T\2\u02bf\u02c0\7\\\2\2\u02c0\u02c1\5\u00aeX\2\u02c1\u03bc")
buf.write("\3\2\2\2\u02c2\u02c3\7\u00d3\2\2\u02c3\u02c9\7a\2\2\u02c4")
buf.write("\u02c6\7\u009e\2\2\u02c5\u02c7\7\u00da\2\2\u02c6\u02c5")
buf.write("\3\2\2\2\u02c6\u02c7\3\2\2\2\u02c7\u02c8\3\2\2\2\u02c8")
buf.write("\u02ca\5\u00a6T\2\u02c9\u02c4\3\2\2\2\u02c9\u02ca\3\2")
buf.write("\2\2\u02ca\u03bc\3\2\2\2\u02cb\u02d7\7R\2\2\u02cc\u02cd")
buf.write("\7\4\2\2\u02cd\u02d2\5\u0098M\2\u02ce\u02cf\7\6\2\2\u02cf")
buf.write("\u02d1\5\u0098M\2\u02d0\u02ce\3\2\2\2\u02d1\u02d4\3\2")
buf.write("\2\2\u02d2\u02d0\3\2\2\2\u02d2\u02d3\3\2\2\2\u02d3\u02d5")
buf.write("\3\2\2\2\u02d4\u02d2\3\2\2\2\u02d5\u02d6\7\5\2\2\u02d6")
buf.write("\u02d8\3\2\2\2\u02d7\u02cc\3\2\2\2\u02d7\u02d8\3\2\2\2")
buf.write("\u02d8\u02d9\3\2\2\2\u02d9\u03bc\5\f\7\2\u02da\u02db\7")
buf.write("R\2\2\u02db\u02dd\7\30\2\2\u02dc\u02de\7\u00f4\2\2\u02dd")
buf.write("\u02dc\3\2\2\2\u02dd\u02de\3\2\2\2\u02de\u02df\3\2\2\2")
buf.write("\u02df\u03bc\5\f\7\2\u02e0\u02e1\7\u00d3\2\2\u02e1\u02e2")
buf.write("\7/\2\2\u02e2\u02e3\7\u00da\2\2\u02e3\u03bc\5\u00a6T\2")
buf.write("\u02e4\u02e5\7\u00d3\2\2\u02e5\u02e6\7/\2\2\u02e6\u02e7")
buf.write("\7\u00c9\2\2\u02e7\u03bc\5\u00a6T\2\u02e8\u02e9\7\u00d3")
buf.write("\2\2\u02e9\u02ea\7/\2\2\u02ea\u02eb\7\u00f6\2\2\u02eb")
buf.write("\u03bc\5\u00a6T\2\u02ec\u02ed\7\u00d3\2\2\u02ed\u02ee")
buf.write("\7/\2\2\u02ee\u02ef\7\u0089\2\2\u02ef\u02f0\7\u00f6\2")
buf.write("\2\u02f0\u03bc\5\u00a6T\2\u02f1\u02f2\7\u00d3\2\2\u02f2")
buf.write("\u02f5\7\u00db\2\2\u02f3\u02f4\t\5\2\2\u02f4\u02f6\5\u00a6")
buf.write("T\2\u02f5\u02f3\3\2\2\2\u02f5\u02f6\3\2\2\2\u02f6\u02fd")
buf.write("\3\2\2\2\u02f7\u02f8\7}\2\2\u02f8\u02fb\5l\67\2\u02f9")
buf.write("\u02fa\7M\2\2\u02fa\u02fc\5l\67\2\u02fb\u02f9\3\2\2\2")
buf.write("\u02fb\u02fc\3\2\2\2\u02fc\u02fe\3\2\2\2\u02fd\u02f7\3")
buf.write("\2\2\2\u02fd\u02fe\3\2\2\2\u02fe\u03bc\3\2\2\2\u02ff\u0300")
buf.write("\7\u00d3\2\2\u0300\u0303\7\u00ca\2\2\u0301\u0302\t\5\2")
buf.write("\2\u0302\u0304\5\u00b2Z\2\u0303\u0301\3\2\2\2\u0303\u0304")
buf.write("\3\2\2\2\u0304\u030b\3\2\2\2\u0305\u0306\7}\2\2\u0306")
buf.write("\u0309\5l\67\2\u0307\u0308\7M\2\2\u0308\u030a\5l\67\2")
buf.write("\u0309\u0307\3\2\2\2\u0309\u030a\3\2\2\2\u030a\u030c\3")
buf.write("\2\2\2\u030b\u0305\3\2\2\2\u030b\u030c\3\2\2\2\u030c\u03bc")
buf.write("\3\2\2\2\u030d\u030e\7\u00d3\2\2\u030e\u0315\7\'\2\2\u030f")
buf.write("\u0310\7}\2\2\u0310\u0313\5l\67\2\u0311\u0312\7M\2\2\u0312")
buf.write("\u0314\5l\67\2\u0313\u0311\3\2\2\2\u0313\u0314\3\2\2\2")
buf.write("\u0314\u0316\3\2\2\2\u0315\u030f\3\2\2\2\u0315\u0316\3")
buf.write("\2\2\2\u0316\u03bc\3\2\2\2\u0317\u0318\7\u00d3\2\2\u0318")
buf.write("\u0319\7)\2\2\u0319\u031b\t\5\2\2\u031a\u031c\5\u00a6")
buf.write("T\2\u031b\u031a\3\2\2\2\u031b\u031c\3\2\2\2\u031c\u0323")
buf.write("\3\2\2\2\u031d\u031e\7}\2\2\u031e\u0321\5l\67\2\u031f")
buf.write("\u0320\7M\2\2\u0320\u0322\5l\67\2\u0321\u031f\3\2\2\2")
buf.write("\u0321\u0322\3\2\2\2\u0322\u0324\3\2\2\2\u0323\u031d\3")
buf.write("\2\2\2\u0323\u0324\3\2\2\2\u0324\u03bc\3\2\2\2\u0325\u0326")
buf.write("\7\u00d3\2\2\u0326\u0327\7\u00d6\2\2\u0327\u0328\7Z\2")
buf.write("\2\u0328\u03bc\5\u00a6T\2\u0329\u032a\7\u00d3\2\2\u032a")
buf.write("\u032b\7\u00d6\2\2\u032b\u032c\7Z\2\2\u032c\u032d\7\4")
buf.write("\2\2\u032d\u032e\5\16\b\2\u032e\u032f\7\5\2\2\u032f\u03bc")
buf.write("\3\2\2\2\u0330\u0332\7\u00d3\2\2\u0331\u0333\7\62\2\2")
buf.write("\u0332\u0331\3\2\2\2\u0332\u0333\3\2\2\2\u0333\u0334\3")
buf.write("\2\2\2\u0334\u0337\7\u00c3\2\2\u0335\u0336\t\5\2\2\u0336")
buf.write("\u0338\5\u00b2Z\2\u0337\u0335\3\2\2\2\u0337\u0338\3\2")
buf.write("\2\2\u0338\u03bc\3\2\2\2\u0339\u033a\7\u00d3\2\2\u033a")
buf.write("\u033b\7\u00c2\2\2\u033b\u033e\7a\2\2\u033c\u033d\t\5")
buf.write("\2\2\u033d\u033f\5\u00b2Z\2\u033e\u033c\3\2\2\2\u033e")
buf.write("\u033f\3\2\2\2\u033f\u03bc\3\2\2\2\u0340\u0341\7C\2\2")
buf.write("\u0341\u03bc\5\u00a6T\2\u0342\u0343\7B\2\2\u0343\u03bc")
buf.write("\5\u00a6T\2\u0344\u0345\7\u00d3\2\2\u0345\u034c\7^\2\2")
buf.write("\u0346\u0347\7}\2\2\u0347\u034a\5l\67\2\u0348\u0349\7")
buf.write("M\2\2\u0349\u034b\5l\67\2\u034a\u0348\3\2\2\2\u034a\u034b")
buf.write("\3\2\2\2\u034b\u034d\3\2\2\2\u034c\u0346\3\2\2\2\u034c")
buf.write("\u034d\3\2\2\2\u034d\u03bc\3\2\2\2\u034e\u034f\7\u00d3")
buf.write("\2\2\u034f\u0356\7\u00d0\2\2\u0350\u0351\7}\2\2\u0351")
buf.write("\u0354\5l\67\2\u0352\u0353\7M\2\2\u0353\u0355\5l\67\2")
buf.write("\u0354\u0352\3\2\2\2\u0354\u0355\3\2\2\2\u0355\u0357\3")
buf.write("\2\2\2\u0356\u0350\3\2\2\2\u0356\u0357\3\2\2\2\u0357\u03bc")
buf.write("\3\2\2\2\u0358\u0359\7\u00d1\2\2\u0359\u035a\7\u00d0\2")
buf.write("\2\u035a\u035b\5\u00a6T\2\u035b\u035c\7\u0101\2\2\u035c")
buf.write("\u035d\5^\60\2\u035d\u03bc\3\2\2\2\u035e\u035f\7\u00bd")
buf.write("\2\2\u035f\u0360\7\u00d0\2\2\u0360\u03bc\5\u00a6T\2\u0361")
buf.write("\u0362\7\u00d5\2\2\u0362\u036b\7\u00e3\2\2\u0363\u0368")
buf.write("\5\u009aN\2\u0364\u0365\7\6\2\2\u0365\u0367\5\u009aN\2")
buf.write("\u0366\u0364\3\2\2\2\u0367\u036a\3\2\2\2\u0368\u0366\3")
buf.write("\2\2\2\u0368\u0369\3\2\2\2\u0369\u036c\3\2\2\2\u036a\u0368")
buf.write("\3\2\2\2\u036b\u0363\3\2\2\2\u036b\u036c\3\2\2\2\u036c")
buf.write("\u03bc\3\2\2\2\u036d\u036f\7+\2\2\u036e\u0370\7\u00fd")
buf.write("\2\2\u036f\u036e\3\2\2\2\u036f\u0370\3\2\2\2\u0370\u03bc")
buf.write("\3\2\2\2\u0371\u0373\7\u00c4\2\2\u0372\u0374\7\u00fd\2")
buf.write("\2\u0373\u0372\3\2\2\2\u0373\u0374\3\2\2\2\u0374\u03bc")
buf.write("\3\2\2\2\u0375\u0376\7\u00b3\2\2\u0376\u0377\5\u00b2Z")
buf.write("\2\u0377\u0378\7\\\2\2\u0378\u0379\5\f\7\2\u0379\u03bc")
buf.write("\3\2\2\2\u037a\u037b\7>\2\2\u037b\u037c\7\u00b3\2\2\u037c")
buf.write("\u03bc\5\u00b2Z\2\u037d\u037e\7P\2\2\u037e\u0388\5\u00b2")
buf.write("Z\2\u037f\u0380\7\u00f1\2\2\u0380\u0385\5^\60\2\u0381")
buf.write("\u0382\7\6\2\2\u0382\u0384\5^\60\2\u0383\u0381\3\2\2\2")
buf.write("\u0384\u0387\3\2\2\2\u0385\u0383\3\2\2\2\u0385\u0386\3")
buf.write("\2\2\2\u0386\u0389\3\2\2\2\u0387\u0385\3\2\2\2\u0388\u037f")
buf.write("\3\2\2\2\u0388\u0389\3\2\2\2\u0389\u03bc\3\2\2\2\u038a")
buf.write("\u038b\7C\2\2\u038b\u038c\7n\2\2\u038c\u03bc\5\u00b2Z")
buf.write("\2\u038d\u038e\7C\2\2\u038e\u038f\7\u00a6\2\2\u038f\u03bc")
buf.write("\5\u00b2Z\2\u0390\u0391\7\u00d1\2\2\u0391\u0392\7\u00ac")
buf.write("\2\2\u0392\u03bc\5\u00a2R\2\u0393\u0394\7\u00d1\2\2\u0394")
buf.write("\u0395\7\u00e0\2\2\u0395\u0398\7\u0100\2\2\u0396\u0399")
buf.write("\7\u0080\2\2\u0397\u0399\5^\60\2\u0398\u0396\3\2\2\2\u0398")
buf.write("\u0397\3\2\2\2\u0399\u03bc\3\2\2\2\u039a\u039b\7\u00ee")
buf.write("\2\2\u039b\u039c\5\u00a6T\2\u039c\u039d\7\u00d1\2\2\u039d")
buf.write("\u03a2\5\u0096L\2\u039e\u039f\7\6\2\2\u039f\u03a1\5\u0096")
buf.write("L\2\u03a0\u039e\3\2\2\2\u03a1\u03a4\3\2\2\2\u03a2\u03a0")
buf.write("\3\2\2\2\u03a2\u03a3\3\2\2\2\u03a3\u03a7\3\2\2\2\u03a4")
buf.write("\u03a2\3\2\2\2\u03a5\u03a6\7\u00f8\2\2\u03a6\u03a8\5`")
buf.write("\61\2\u03a7\u03a5\3\2\2\2\u03a7\u03a8\3\2\2\2\u03a8\u03bc")
buf.write("\3\2\2\2\u03a9\u03aa\7\u008b\2\2\u03aa\u03ab\7r\2\2\u03ab")
buf.write("\u03b0\5\u00a6T\2\u03ac\u03ae\7\34\2\2\u03ad\u03ac\3\2")
buf.write("\2\2\u03ad\u03ae\3\2\2\2\u03ae\u03af\3\2\2\2\u03af\u03b1")
buf.write("\5\u00b2Z\2\u03b0\u03ad\3\2\2\2\u03b0\u03b1\3\2\2\2\u03b1")
buf.write("\u03b2\3\2\2\2\u03b2\u03b3\7\u00f1\2\2\u03b3\u03b4\5<")
buf.write("\37\2\u03b4\u03b5\7\u009e\2\2\u03b5\u03b7\5^\60\2\u03b6")
buf.write("\u03b8\5\u0086D\2\u03b7\u03b6\3\2\2\2\u03b8\u03b9\3\2")
buf.write("\2\2\u03b9\u03b7\3\2\2\2\u03b9\u03ba\3\2\2\2\u03ba\u03bc")
buf.write("\3\2\2\2\u03bb\u00c7\3\2\2\2\u03bb\u00c8\3\2\2\2\u03bb")
buf.write("\u00ca\3\2\2\2\u03bb\u00cf\3\2\2\2\u03bb\u00df\3\2\2\2")
buf.write("\u03bb\u00e9\3\2\2\2\u03bb\u00f0\3\2\2\2\u03bb\u00f7\3")
buf.write("\2\2\2\u03bb\u0119\3\2\2\2\u03bb\u0133\3\2\2\2\u03bb\u013a")
buf.write("\3\2\2\2\u03bb\u0142\3\2\2\2\u03bb\u0149\3\2\2\2\u03bb")
buf.write("\u014c\3\2\2\2\u03bb\u0155\3\2\2\2\u03bb\u015e\3\2\2\2")
buf.write("\u03bb\u0169\3\2\2\2\u03bb\u0179\3\2\2\2\u03bb\u018a\3")
buf.write("\2\2\2\u03bb\u0199\3\2\2\2\u03bb\u01a0\3\2\2\2\u03bb\u01a7")
buf.write("\3\2\2\2\u03bb\u01be\3\2\2\2\u03bb\u01c4\3\2\2\2\u03bb")
buf.write("\u01dc\3\2\2\2\u03bb\u01ee\3\2\2\2\u03bb\u01f2\3\2\2\2")
buf.write("\u03bb\u01fa\3\2\2\2\u03bb\u0206\3\2\2\2\u03bb\u020d\3")
buf.write("\2\2\2\u03bb\u0214\3\2\2\2\u03bb\u021b\3\2\2\2\u03bb\u022a")
buf.write("\3\2\2\2\u03bb\u0236\3\2\2\2\u03bb\u023d\3\2\2\2\u03bb")
buf.write("\u0256\3\2\2\2\u03bb\u026f\3\2\2\2\u03bb\u027a\3\2\2\2")
buf.write("\u03bb\u0293\3\2\2\2\u03bb\u02a8\3\2\2\2\u03bb\u02c2\3")
buf.write("\2\2\2\u03bb\u02cb\3\2\2\2\u03bb\u02da\3\2\2\2\u03bb\u02e0")
buf.write("\3\2\2\2\u03bb\u02e4\3\2\2\2\u03bb\u02e8\3\2\2\2\u03bb")
buf.write("\u02ec\3\2\2\2\u03bb\u02f1\3\2\2\2\u03bb\u02ff\3\2\2\2")
buf.write("\u03bb\u030d\3\2\2\2\u03bb\u0317\3\2\2\2\u03bb\u0325\3")
buf.write("\2\2\2\u03bb\u0329\3\2\2\2\u03bb\u0330\3\2\2\2\u03bb\u0339")
buf.write("\3\2\2\2\u03bb\u0340\3\2\2\2\u03bb\u0342\3\2\2\2\u03bb")
buf.write("\u0344\3\2\2\2\u03bb\u034e\3\2\2\2\u03bb\u0358\3\2\2\2")
buf.write("\u03bb\u035e\3\2\2\2\u03bb\u0361\3\2\2\2\u03bb\u036d\3")
buf.write("\2\2\2\u03bb\u0371\3\2\2\2\u03bb\u0375\3\2\2\2\u03bb\u037a")
buf.write("\3\2\2\2\u03bb\u037d\3\2\2\2\u03bb\u038a\3\2\2\2\u03bb")
buf.write("\u038d\3\2\2\2\u03bb\u0390\3\2\2\2\u03bb\u0393\3\2\2\2")
buf.write("\u03bb\u039a\3\2\2\2\u03bb\u03a9\3\2\2\2\u03bc\r\3\2\2")
buf.write("\2\u03bd\u03bf\5\20\t\2\u03be\u03bd\3\2\2\2\u03be\u03bf")
buf.write("\3\2\2\2\u03bf\u03c0\3\2\2\2\u03c0\u03c1\5\36\20\2\u03c1")
buf.write("\17\3\2\2\2\u03c2\u03c4\7\u00fa\2\2\u03c3\u03c5\7\u00b8")
buf.write("\2\2\u03c4\u03c3\3\2\2\2\u03c4\u03c5\3\2\2\2\u03c5\u03c6")
buf.write("\3\2\2\2\u03c6\u03cb\5\66\34\2\u03c7\u03c8\7\6\2\2\u03c8")
buf.write("\u03ca\5\66\34\2\u03c9\u03c7\3\2\2\2\u03ca\u03cd\3\2\2")
buf.write("\2\u03cb\u03c9\3\2\2\2\u03cb\u03cc\3\2\2\2\u03cc\21\3")
buf.write("\2\2\2\u03cd\u03cb\3\2\2\2\u03ce\u03d1\5\24\13\2\u03cf")
buf.write("\u03d1\5\26\f\2\u03d0\u03ce\3\2\2\2\u03d0\u03cf\3\2\2")
buf.write("\2\u03d1\23\3\2\2\2\u03d2\u03d3\5\u00b2Z\2\u03d3\u03d6")
buf.write("\5|?\2\u03d4\u03d5\7\u0097\2\2\u03d5\u03d7\7\u0098\2\2")
buf.write("\u03d6\u03d4\3\2\2\2\u03d6\u03d7\3\2\2\2\u03d7\u03da\3")
buf.write("\2\2\2\u03d8\u03d9\7*\2\2\u03d9\u03db\5l\67\2\u03da\u03d8")
buf.write("\3\2\2\2\u03da\u03db\3\2\2\2\u03db\u03de\3\2\2\2\u03dc")
buf.write("\u03dd\7\u00fa\2\2\u03dd\u03df\5\30\r\2\u03de\u03dc\3")
buf.write("\2\2\2\u03de\u03df\3\2\2\2\u03df\25\3\2\2\2\u03e0\u03e1")
buf.write("\7}\2\2\u03e1\u03e4\5\u00a6T\2\u03e2\u03e3\t\6\2\2\u03e3")
buf.write("\u03e5\7\u00b5\2\2\u03e4\u03e2\3\2\2\2\u03e4\u03e5\3\2")
buf.write("\2\2\u03e5\27\3\2\2\2\u03e6\u03e7\7\4\2\2\u03e7\u03e8")
buf.write("\5\32\16\2\u03e8\u03e9\7\5\2\2\u03e9\31\3\2\2\2\u03ea")
buf.write("\u03ef\5\34\17\2\u03eb\u03ec\7\6\2\2\u03ec\u03ee\5\34")
buf.write("\17\2\u03ed\u03eb\3\2\2\2\u03ee\u03f1\3\2\2\2\u03ef\u03ed")
buf.write("\3\2\2\2\u03ef\u03f0\3\2\2\2\u03f0\33\3\2\2\2\u03f1\u03ef")
buf.write("\3\2\2\2\u03f2\u03f3\5\u00b2Z\2\u03f3\u03f4\7\u0101\2")
buf.write("\2\u03f4\u03f5\5^\60\2\u03f5\35\3\2\2\2\u03f6\u0401\5")
buf.write("$\23\2\u03f7\u03f8\7\u00a3\2\2\u03f8\u03f9\7\"\2\2\u03f9")
buf.write("\u03fe\5(\25\2\u03fa\u03fb\7\6\2\2\u03fb\u03fd\5(\25\2")
buf.write("\u03fc\u03fa\3\2\2\2\u03fd\u0400\3\2\2\2\u03fe\u03fc\3")
buf.write("\2\2\2\u03fe\u03ff\3\2\2\2\u03ff\u0402\3\2\2\2\u0400\u03fe")
buf.write("\3\2\2\2\u0401\u03f7\3\2\2\2\u0401\u0402\3\2\2\2\u0402")
buf.write("\u0408\3\2\2\2\u0403\u0404\7\u009b\2\2\u0404\u0406\5\"")
buf.write("\22\2\u0405\u0407\t\7\2\2\u0406\u0405\3\2\2\2\u0406\u0407")
buf.write("\3\2\2\2\u0407\u0409\3\2\2\2\u0408\u0403\3\2\2\2\u0408")
buf.write("\u0409\3\2\2\2\u0409\u0417\3\2\2\2\u040a\u040b\7~\2\2")
buf.write("\u040b\u0418\5 \21\2\u040c\u040d\7U\2\2\u040d\u040f\t")
buf.write("\b\2\2\u040e\u0410\5\"\22\2\u040f\u040e\3\2\2\2\u040f")
buf.write("\u0410\3\2\2\2\u0410\u0411\3\2\2\2\u0411\u0415\t\7\2\2")
buf.write("\u0412\u0416\7\u00a0\2\2\u0413\u0414\7\u00fa\2\2\u0414")
buf.write("\u0416\7\u00df\2\2\u0415\u0412\3\2\2\2\u0415\u0413\3\2")
buf.write("\2\2\u0416\u0418\3\2\2\2\u0417\u040a\3\2\2\2\u0417\u040c")
buf.write("\3\2\2\2\u0417\u0418\3\2\2\2\u0418\37\3\2\2\2\u0419\u041c")
buf.write("\7\26\2\2\u041a\u041c\5\"\22\2\u041b\u0419\3\2\2\2\u041b")
buf.write("\u041a\3\2\2\2\u041c!\3\2\2\2\u041d\u041e\t\t\2\2\u041e")
buf.write("#\3\2\2\2\u041f\u0420\b\23\1\2\u0420\u0421\5&\24\2\u0421")
buf.write("\u0430\3\2\2\2\u0422\u0423\f\4\2\2\u0423\u0425\7p\2\2")
buf.write("\u0424\u0426\58\35\2\u0425\u0424\3\2\2\2\u0425\u0426\3")
buf.write("\2\2\2\u0426\u0427\3\2\2\2\u0427\u042f\5$\23\5\u0428\u0429")
buf.write("\f\3\2\2\u0429\u042b\t\n\2\2\u042a\u042c\58\35\2\u042b")
buf.write("\u042a\3\2\2\2\u042b\u042c\3\2\2\2\u042c\u042d\3\2\2\2")
buf.write("\u042d\u042f\5$\23\4\u042e\u0422\3\2\2\2\u042e\u0428\3")
buf.write("\2\2\2\u042f\u0432\3\2\2\2\u0430\u042e\3\2\2\2\u0430\u0431")
buf.write("\3\2\2\2\u0431%\3\2\2\2\u0432\u0430\3\2\2\2\u0433\u0444")
buf.write("\5*\26\2\u0434\u0435\7\u00da\2\2\u0435\u0444\5\u00a6T")
buf.write("\2\u0436\u0437\7\u00f3\2\2\u0437\u043c\5^\60\2\u0438\u0439")
buf.write("\7\6\2\2\u0439\u043b\5^\60\2\u043a\u0438\3\2\2\2\u043b")
buf.write("\u043e\3\2\2\2\u043c\u043a\3\2\2\2\u043c\u043d\3\2\2\2")
buf.write("\u043d\u0444\3\2\2\2\u043e\u043c\3\2\2\2\u043f\u0440\7")
buf.write("\4\2\2\u0440\u0441\5\36\20\2\u0441\u0442\7\5\2\2\u0442")
buf.write("\u0444\3\2\2\2\u0443\u0433\3\2\2\2\u0443\u0434\3\2\2\2")
buf.write("\u0443\u0436\3\2\2\2\u0443\u043f\3\2\2\2\u0444\'\3\2\2")
buf.write("\2\u0445\u0447\5^\60\2\u0446\u0448\t\13\2\2\u0447\u0446")
buf.write("\3\2\2\2\u0447\u0448\3\2\2\2\u0448\u044b\3\2\2\2\u0449")
buf.write("\u044a\7\u009a\2\2\u044a\u044c\t\f\2\2\u044b\u0449\3\2")
buf.write("\2\2\u044b\u044c\3\2\2\2\u044c)\3\2\2\2\u044d\u044f\7")
buf.write("\u00ce\2\2\u044e\u0450\58\35\2\u044f\u044e\3\2\2\2\u044f")
buf.write("\u0450\3\2\2\2\u0450\u0451\3\2\2\2\u0451\u0456\5:\36\2")
buf.write("\u0452\u0453\7\6\2\2\u0453\u0455\5:\36\2\u0454\u0452\3")
buf.write("\2\2\2\u0455\u0458\3\2\2\2\u0456\u0454\3\2\2\2\u0456\u0457")
buf.write("\3\2\2\2\u0457\u0462\3\2\2\2\u0458\u0456\3\2\2\2\u0459")
buf.write("\u045a\7\\\2\2\u045a\u045f\5<\37\2\u045b\u045c\7\6\2\2")
buf.write("\u045c\u045e\5<\37\2\u045d\u045b\3\2\2\2\u045e\u0461\3")
buf.write("\2\2\2\u045f\u045d\3\2\2\2\u045f\u0460\3\2\2\2\u0460\u0463")
buf.write("\3\2\2\2\u0461\u045f\3\2\2\2\u0462\u0459\3\2\2\2\u0462")
buf.write("\u0463\3\2\2\2\u0463\u0466\3\2\2\2\u0464\u0465\7\u00f8")
buf.write("\2\2\u0465\u0467\5`\61\2\u0466\u0464\3\2\2\2\u0466\u0467")
buf.write("\3\2\2\2\u0467\u046b\3\2\2\2\u0468\u0469\7c\2\2\u0469")
buf.write("\u046a\7\"\2\2\u046a\u046c\5,\27\2\u046b\u0468\3\2\2\2")
buf.write("\u046b\u046c\3\2\2\2\u046c\u046f\3\2\2\2\u046d\u046e\7")
buf.write("f\2\2\u046e\u0470\5`\61\2\u046f\u046d\3\2\2\2\u046f\u0470")
buf.write("\3\2\2\2\u0470\u047a\3\2\2\2\u0471\u0472\7\u00f9\2\2\u0472")
buf.write("\u0477\5\62\32\2\u0473\u0474\7\6\2\2\u0474\u0476\5\62")
buf.write("\32\2\u0475\u0473\3\2\2\2\u0476\u0479\3\2\2\2\u0477\u0475")
buf.write("\3\2\2\2\u0477\u0478\3\2\2\2\u0478\u047b\3\2\2\2\u0479")
buf.write("\u0477\3\2\2\2\u047a\u0471\3\2\2\2\u047a\u047b\3\2\2\2")
buf.write("\u047b+\3\2\2\2\u047c\u047e\58\35\2\u047d\u047c\3\2\2")
buf.write("\2\u047d\u047e\3\2\2\2\u047e\u047f\3\2\2\2\u047f\u0484")
buf.write("\5.\30\2\u0480\u0481\7\6\2\2\u0481\u0483\5.\30\2\u0482")
buf.write("\u0480\3\2\2\2\u0483\u0486\3\2\2\2\u0484\u0482\3\2\2\2")
buf.write("\u0484\u0485\3\2\2\2\u0485-\3\2\2\2\u0486\u0484\3\2\2")
buf.write("\2\u0487\u04b0\5\60\31\2\u0488\u0489\7\u00c5\2\2\u0489")
buf.write("\u0492\7\4\2\2\u048a\u048f\5^\60\2\u048b\u048c\7\6\2\2")
buf.write("\u048c\u048e\5^\60\2\u048d\u048b\3\2\2\2\u048e\u0491\3")
buf.write("\2\2\2\u048f\u048d\3\2\2\2\u048f\u0490\3\2\2\2\u0490\u0493")
buf.write("\3\2\2\2\u0491\u048f\3\2\2\2\u0492\u048a\3\2\2\2\u0492")
buf.write("\u0493\3\2\2\2\u0493\u0494\3\2\2\2\u0494\u04b0\7\5\2\2")
buf.write("\u0495\u0496\7\61\2\2\u0496\u049f\7\4\2\2\u0497\u049c")
buf.write("\5^\60\2\u0498\u0499\7\6\2\2\u0499\u049b\5^\60\2\u049a")
buf.write("\u0498\3\2\2\2\u049b\u049e\3\2\2\2\u049c\u049a\3\2\2\2")
buf.write("\u049c\u049d\3\2\2\2\u049d\u04a0\3\2\2\2\u049e\u049c\3")
buf.write("\2\2\2\u049f\u0497\3\2\2\2\u049f\u04a0\3\2\2\2\u04a0\u04a1")
buf.write("\3\2\2\2\u04a1\u04b0\7\5\2\2\u04a2\u04a3\7d\2\2\u04a3")
buf.write("\u04a4\7\u00d2\2\2\u04a4\u04a5\7\4\2\2\u04a5\u04aa\5\60")
buf.write("\31\2\u04a6\u04a7\7\6\2\2\u04a7\u04a9\5\60\31\2\u04a8")
buf.write("\u04a6\3\2\2\2\u04a9\u04ac\3\2\2\2\u04aa\u04a8\3\2\2\2")
buf.write("\u04aa\u04ab\3\2\2\2\u04ab\u04ad\3\2\2\2\u04ac\u04aa\3")
buf.write("\2\2\2\u04ad\u04ae\7\5\2\2\u04ae\u04b0\3\2\2\2\u04af\u0487")
buf.write("\3\2\2\2\u04af\u0488\3\2\2\2\u04af\u0495\3\2\2\2\u04af")
buf.write("\u04a2\3\2\2\2\u04b0/\3\2\2\2\u04b1\u04ba\7\4\2\2\u04b2")
buf.write("\u04b7\5^\60\2\u04b3\u04b4\7\6\2\2\u04b4\u04b6\5^\60\2")
buf.write("\u04b5\u04b3\3\2\2\2\u04b6\u04b9\3\2\2\2\u04b7\u04b5\3")
buf.write("\2\2\2\u04b7\u04b8\3\2\2\2\u04b8\u04bb\3\2\2\2\u04b9\u04b7")
buf.write("\3\2\2\2\u04ba\u04b2\3\2\2\2\u04ba\u04bb\3\2\2\2\u04bb")
buf.write("\u04bc\3\2\2\2\u04bc\u04bf\7\5\2\2\u04bd\u04bf\5^\60\2")
buf.write("\u04be\u04b1\3\2\2\2\u04be\u04bd\3\2\2\2\u04bf\61\3\2")
buf.write("\2\2\u04c0\u04c1\5\u00b2Z\2\u04c1\u04c2\7\34\2\2\u04c2")
buf.write("\u04c3\7\4\2\2\u04c3\u04c4\5\64\33\2\u04c4\u04c5\7\5\2")
buf.write("\2\u04c5\63\3\2\2\2\u04c6\u04c8\5\u00b2Z\2\u04c7\u04c6")
buf.write("\3\2\2\2\u04c7\u04c8\3\2\2\2\u04c8\u04d3\3\2\2\2\u04c9")
buf.write("\u04ca\7\u00a9\2\2\u04ca\u04cb\7\"\2\2\u04cb\u04d0\5^")
buf.write("\60\2\u04cc\u04cd\7\6\2\2\u04cd\u04cf\5^\60\2\u04ce\u04cc")
buf.write("\3\2\2\2\u04cf\u04d2\3\2\2\2\u04d0\u04ce\3\2\2\2\u04d0")
buf.write("\u04d1\3\2\2\2\u04d1\u04d4\3\2\2\2\u04d2\u04d0\3\2\2\2")
buf.write("\u04d3\u04c9\3\2\2\2\u04d3\u04d4\3\2\2\2\u04d4\u04df\3")
buf.write("\2\2\2\u04d5\u04d6\7\u00a3\2\2\u04d6\u04d7\7\"\2\2\u04d7")
buf.write("\u04dc\5(\25\2\u04d8\u04d9\7\6\2\2\u04d9\u04db\5(\25\2")
buf.write("\u04da\u04d8\3\2\2\2\u04db\u04de\3\2\2\2\u04dc\u04da\3")
buf.write("\2\2\2\u04dc\u04dd\3\2\2\2\u04dd\u04e0\3\2\2\2\u04de\u04dc")
buf.write("\3\2\2\2\u04df\u04d5\3\2\2\2\u04df\u04e0\3\2\2\2\u04e0")
buf.write("\u04e2\3\2\2\2\u04e1\u04e3\5\u008aF\2\u04e2\u04e1\3\2")
buf.write("\2\2\u04e2\u04e3\3\2\2\2\u04e3\65\3\2\2\2\u04e4\u04e6")
buf.write("\5\u00b2Z\2\u04e5\u04e7\5Z.\2\u04e6\u04e5\3\2\2\2\u04e6")
buf.write("\u04e7\3\2\2\2\u04e7\u04e8\3\2\2\2\u04e8\u04e9\7\34\2")
buf.write("\2\u04e9\u04ea\7\4\2\2\u04ea\u04eb\5\16\b\2\u04eb\u04ec")
buf.write("\7\5\2\2\u04ec\67\3\2\2\2\u04ed\u04ee\t\r\2\2\u04ee9\3")
buf.write("\2\2\2\u04ef\u04f4\5^\60\2\u04f0\u04f2\7\34\2\2\u04f1")
buf.write("\u04f0\3\2\2\2\u04f1\u04f2\3\2\2\2\u04f2\u04f3\3\2\2\2")
buf.write("\u04f3\u04f5\5\u00b2Z\2\u04f4\u04f1\3\2\2\2\u04f4\u04f5")
buf.write("\3\2\2\2\u04f5\u04ff\3\2\2\2\u04f6\u04f7\5f\64\2\u04f7")
buf.write("\u04f8\7\3\2\2\u04f8\u04fb\7\u0109\2\2\u04f9\u04fa\7\34")
buf.write("\2\2\u04fa\u04fc\5Z.\2\u04fb\u04f9\3\2\2\2\u04fb\u04fc")
buf.write("\3\2\2\2\u04fc\u04ff\3\2\2\2\u04fd\u04ff\7\u0109\2\2\u04fe")
buf.write("\u04ef\3\2\2\2\u04fe\u04f6\3\2\2\2\u04fe\u04fd\3\2\2\2")
buf.write("\u04ff;\3\2\2\2\u0500\u0501\b\37\1\2\u0501\u0502\5B\"")
buf.write("\2\u0502\u0515\3\2\2\2\u0503\u0511\f\4\2\2\u0504\u0505")
buf.write("\7\60\2\2\u0505\u0506\7w\2\2\u0506\u0512\5B\"\2\u0507")
buf.write("\u0508\5> \2\u0508\u0509\7w\2\2\u0509\u050a\5<\37\2\u050a")
buf.write("\u050b\5@!\2\u050b\u0512\3\2\2\2\u050c\u050d\7\u008e\2")
buf.write("\2\u050d\u050e\5> \2\u050e\u050f\7w\2\2\u050f\u0510\5")
buf.write("B\"\2\u0510\u0512\3\2\2\2\u0511\u0504\3\2\2\2\u0511\u0507")
buf.write("\3\2\2\2\u0511\u050c\3\2\2\2\u0512\u0514\3\2\2\2\u0513")
buf.write("\u0503\3\2\2\2\u0514\u0517\3\2\2\2\u0515\u0513\3\2\2\2")
buf.write("\u0515\u0516\3\2\2\2\u0516=\3\2\2\2\u0517\u0515\3\2\2")
buf.write("\2\u0518\u051a\7m\2\2\u0519\u0518\3\2\2\2\u0519\u051a")
buf.write("\3\2\2\2\u051a\u0528\3\2\2\2\u051b\u051d\7{\2\2\u051c")
buf.write("\u051e\7\u00a5\2\2\u051d\u051c\3\2\2\2\u051d\u051e\3\2")
buf.write("\2\2\u051e\u0528\3\2\2\2\u051f\u0521\7\u00c1\2\2\u0520")
buf.write("\u0522\7\u00a5\2\2\u0521\u0520\3\2\2\2\u0521\u0522\3\2")
buf.write("\2\2\u0522\u0528\3\2\2\2\u0523\u0525\7]\2\2\u0524\u0526")
buf.write("\7\u00a5\2\2\u0525\u0524\3\2\2\2\u0525\u0526\3\2\2\2\u0526")
buf.write("\u0528\3\2\2\2\u0527\u0519\3\2\2\2\u0527\u051b\3\2\2\2")
buf.write("\u0527\u051f\3\2\2\2\u0527\u0523\3\2\2\2\u0528?\3\2\2")
buf.write("\2\u0529\u052a\7\u009e\2\2\u052a\u0538\5`\61\2\u052b\u052c")
buf.write("\7\u00f1\2\2\u052c\u052d\7\4\2\2\u052d\u0532\5\u00b2Z")
buf.write("\2\u052e\u052f\7\6\2\2\u052f\u0531\5\u00b2Z\2\u0530\u052e")
buf.write("\3\2\2\2\u0531\u0534\3\2\2\2\u0532\u0530\3\2\2\2\u0532")
buf.write("\u0533\3\2\2\2\u0533\u0535\3\2\2\2\u0534\u0532\3\2\2\2")
buf.write("\u0535\u0536\7\5\2\2\u0536\u0538\3\2\2\2\u0537\u0529\3")
buf.write("\2\2\2\u0537\u052b\3\2\2\2\u0538A\3\2\2\2\u0539\u0540")
buf.write("\5J&\2\u053a\u053b\7\u00dc\2\2\u053b\u053c\5D#\2\u053c")
buf.write("\u053d\7\4\2\2\u053d\u053e\5^\60\2\u053e\u053f\7\5\2\2")
buf.write("\u053f\u0541\3\2\2\2\u0540\u053a\3\2\2\2\u0540\u0541\3")
buf.write("\2\2\2\u0541C\3\2\2\2\u0542\u0543\t\16\2\2\u0543E\3\2")
buf.write("\2\2\u0544\u054b\7L\2\2\u0545\u0547\7\u00e5\2\2\u0546")
buf.write("\u0548\5l\67\2\u0547\u0546\3\2\2\2\u0547\u0548\3\2\2\2")
buf.write("\u0548\u0549\3\2\2\2\u0549\u054b\5H%\2\u054a\u0544\3\2")
buf.write("\2\2\u054a\u0545\3\2\2\2\u054bG\3\2\2\2\u054c\u054d\7")
buf.write("\u00fa\2\2\u054d\u0551\7.\2\2\u054e\u054f\7\u00fc\2\2")
buf.write("\u054f\u0551\7.\2\2\u0550\u054c\3\2\2\2\u0550\u054e\3")
buf.write("\2\2\2\u0551I\3\2\2\2\u0552\u05a5\5X-\2\u0553\u0554\7")
buf.write("\u0088\2\2\u0554\u055f\7\4\2\2\u0555\u0556\7\u00a9\2\2")
buf.write("\u0556\u0557\7\"\2\2\u0557\u055c\5^\60\2\u0558\u0559\7")
buf.write("\6\2\2\u0559\u055b\5^\60\2\u055a\u0558\3\2\2\2\u055b\u055e")
buf.write("\3\2\2\2\u055c\u055a\3\2\2\2\u055c\u055d\3\2\2\2\u055d")
buf.write("\u0560\3\2\2\2\u055e\u055c\3\2\2\2\u055f\u0555\3\2\2\2")
buf.write("\u055f\u0560\3\2\2\2\u0560\u056b\3\2\2\2\u0561\u0562\7")
buf.write("\u00a3\2\2\u0562\u0563\7\"\2\2\u0563\u0568\5(\25\2\u0564")
buf.write("\u0565\7\6\2\2\u0565\u0567\5(\25\2\u0566\u0564\3\2\2\2")
buf.write("\u0567\u056a\3\2\2\2\u0568\u0566\3\2\2\2\u0568\u0569\3")
buf.write("\2\2\2\u0569\u056c\3\2\2\2\u056a\u0568\3\2\2\2\u056b\u0561")
buf.write("\3\2\2\2\u056b\u056c\3\2\2\2\u056c\u0576\3\2\2\2\u056d")
buf.write("\u056e\7\u008a\2\2\u056e\u0573\5L\'\2\u056f\u0570\7\6")
buf.write("\2\2\u0570\u0572\5L\'\2\u0571\u056f\3\2\2\2\u0572\u0575")
buf.write("\3\2\2\2\u0573\u0571\3\2\2\2\u0573\u0574\3\2\2\2\u0574")
buf.write("\u0577\3\2\2\2\u0575\u0573\3\2\2\2\u0576\u056d\3\2\2\2")
buf.write("\u0576\u0577\3\2\2\2\u0577\u0579\3\2\2\2\u0578\u057a\5")
buf.write("N(\2\u0579\u0578\3\2\2\2\u0579\u057a\3\2\2\2\u057a\u057e")
buf.write("\3\2\2\2\u057b\u057c\7\25\2\2\u057c\u057d\7\u0085\2\2")
buf.write("\u057d\u057f\5R*\2\u057e\u057b\3\2\2\2\u057e\u057f\3\2")
buf.write("\2\2\u057f\u0581\3\2\2\2\u0580\u0582\t\17\2\2\u0581\u0580")
buf.write("\3\2\2\2\u0581\u0582\3\2\2\2\u0582\u0583\3\2\2\2\u0583")
buf.write("\u0584\7\u00ad\2\2\u0584\u0585\7\4\2\2\u0585\u0586\5\u0090")
buf.write("I\2\u0586\u0590\7\5\2\2\u0587\u0588\7\u00d7\2\2\u0588")
buf.write("\u058d\5T+\2\u0589\u058a\7\6\2\2\u058a\u058c\5T+\2\u058b")
buf.write("\u0589\3\2\2\2\u058c\u058f\3\2\2\2\u058d\u058b\3\2\2\2")
buf.write("\u058d\u058e\3\2\2\2\u058e\u0591\3\2\2\2\u058f\u058d\3")
buf.write("\2\2\2\u0590\u0587\3\2\2\2\u0590\u0591\3\2\2\2\u0591\u0592")
buf.write("\3\2\2\2\u0592\u0593\7D\2\2\u0593\u0598\5V,\2\u0594\u0595")
buf.write("\7\6\2\2\u0595\u0597\5V,\2\u0596\u0594\3\2\2\2\u0597\u059a")
buf.write("\3\2\2\2\u0598\u0596\3\2\2\2\u0598\u0599\3\2\2\2\u0599")
buf.write("\u059b\3\2\2\2\u059a\u0598\3\2\2\2\u059b\u05a3\7\5\2\2")
buf.write("\u059c\u059e\7\34\2\2\u059d\u059c\3\2\2\2\u059d\u059e")
buf.write("\3\2\2\2\u059e\u059f\3\2\2\2\u059f\u05a1\5\u00b2Z\2\u05a0")
buf.write("\u05a2\5Z.\2\u05a1\u05a0\3\2\2\2\u05a1\u05a2\3\2\2\2\u05a2")
buf.write("\u05a4\3\2\2\2\u05a3\u059d\3\2\2\2\u05a3\u05a4\3\2\2\2")
buf.write("\u05a4\u05a6\3\2\2\2\u05a5\u0553\3\2\2\2\u05a5\u05a6\3")
buf.write("\2\2\2\u05a6K\3\2\2\2\u05a7\u05a8\5^\60\2\u05a8\u05a9")
buf.write("\7\34\2\2\u05a9\u05aa\5\u00b2Z\2\u05aaM\3\2\2\2\u05ab")
buf.write("\u05ac\7\u009f\2\2\u05ac\u05ad\7\u00c6\2\2\u05ad\u05ae")
buf.write("\7\u00ae\2\2\u05ae\u05b7\7\u0085\2\2\u05af\u05b0\7\26")
buf.write("\2\2\u05b0\u05b1\7\u00c7\2\2\u05b1\u05b2\7\u00ae\2\2\u05b2")
buf.write("\u05b4\7\u0085\2\2\u05b3\u05b5\5P)\2\u05b4\u05b3\3\2\2")
buf.write("\2\u05b4\u05b5\3\2\2\2\u05b5\u05b7\3\2\2\2\u05b6\u05ab")
buf.write("\3\2\2\2\u05b6\u05af\3\2\2\2\u05b7O\3\2\2\2\u05b8\u05b9")
buf.write("\7\u00d3\2\2\u05b9\u05ba\7J\2\2\u05ba\u05c2\7\u0087\2")
buf.write("\2\u05bb\u05bc\7\u009c\2\2\u05bc\u05bd\7J\2\2\u05bd\u05c2")
buf.write("\7\u0087\2\2\u05be\u05bf\7\u00fa\2\2\u05bf\u05c0\7\u00ec")
buf.write("\2\2\u05c0\u05c2\7\u00c7\2\2\u05c1\u05b8\3\2\2\2\u05c1")
buf.write("\u05bb\3\2\2\2\u05c1\u05be\3\2\2\2\u05c2Q\3\2\2\2\u05c3")
buf.write("\u05c4\7\7\2\2\u05c4\u05c5\7\u00e2\2\2\u05c5\u05c6\7\u008f")
buf.write("\2\2\u05c6\u05d7\7\u00c6\2\2\u05c7\u05c8\7\7\2\2\u05c8")
buf.write("\u05c9\7\u00ab\2\2\u05c9\u05ca\7y\2\2\u05ca\u05d7\7\u00c6")
buf.write("\2\2\u05cb\u05cc\7\7\2\2\u05cc\u05cd\7\u00e2\2\2\u05cd")
buf.write("\u05ce\7X\2\2\u05ce\u05d7\5\u00b2Z\2\u05cf\u05d0\7\7\2")
buf.write("\2\u05d0\u05d1\7\u00e2\2\2\u05d1\u05d2\7y\2\2\u05d2\u05d7")
buf.write("\5\u00b2Z\2\u05d3\u05d4\7\7\2\2\u05d4\u05d5\7\u00e2\2")
buf.write("\2\u05d5\u05d7\5\u00b2Z\2\u05d6\u05c3\3\2\2\2\u05d6\u05c7")
buf.write("\3\2\2\2\u05d6\u05cb\3\2\2\2\u05d6\u05cf\3\2\2\2\u05d6")
buf.write("\u05d3\3\2\2\2\u05d7S\3\2\2\2\u05d8\u05d9\5\u00b2Z\2\u05d9")
buf.write("\u05da\7\u0101\2\2\u05da\u05db\7\4\2\2\u05db\u05e0\5\u00b2")
buf.write("Z\2\u05dc\u05dd\7\6\2\2\u05dd\u05df\5\u00b2Z\2\u05de\u05dc")
buf.write("\3\2\2\2\u05df\u05e2\3\2\2\2\u05e0\u05de\3\2\2\2\u05e0")
buf.write("\u05e1\3\2\2\2\u05e1\u05e3\3\2\2\2\u05e2\u05e0\3\2\2\2")
buf.write("\u05e3\u05e4\7\5\2\2\u05e4U\3\2\2\2\u05e5\u05e6\5\u00b2")
buf.write("Z\2\u05e6\u05e7\7\34\2\2\u05e7\u05e8\5^\60\2\u05e8W\3")
buf.write("\2\2\2\u05e9\u05f1\5\\/\2\u05ea\u05ec\7\34\2\2\u05eb\u05ea")
buf.write("\3\2\2\2\u05eb\u05ec\3\2\2\2\u05ec\u05ed\3\2\2\2\u05ed")
buf.write("\u05ef\5\u00b2Z\2\u05ee\u05f0\5Z.\2\u05ef\u05ee\3\2\2")
buf.write("\2\u05ef\u05f0\3\2\2\2\u05f0\u05f2\3\2\2\2\u05f1\u05eb")
buf.write("\3\2\2\2\u05f1\u05f2\3\2\2\2\u05f2Y\3\2\2\2\u05f3\u05f4")
buf.write("\7\4\2\2\u05f4\u05f9\5\u00b2Z\2\u05f5\u05f6\7\6\2\2\u05f6")
buf.write("\u05f8\5\u00b2Z\2\u05f7\u05f5\3\2\2\2\u05f8\u05fb\3\2")
buf.write("\2\2\u05f9\u05f7\3\2\2\2\u05f9\u05fa\3\2\2\2\u05fa\u05fc")
buf.write("\3\2\2\2\u05fb\u05f9\3\2\2\2\u05fc\u05fd\7\5\2\2\u05fd")
buf.write("[\3\2\2\2\u05fe\u0600\5\u00a6T\2\u05ff\u0601\5\u00a8U")
buf.write("\2\u0600\u05ff\3\2\2\2\u0600\u0601\3\2\2\2\u0601\u061f")
buf.write("\3\2\2\2\u0602\u0603\7\4\2\2\u0603\u0604\5\16\b\2\u0604")
buf.write("\u0605\7\5\2\2\u0605\u061f\3\2\2\2\u0606\u0607\7\u00ed")
buf.write("\2\2\u0607\u0608\7\4\2\2\u0608\u060d\5^\60\2\u0609\u060a")
buf.write("\7\6\2\2\u060a\u060c\5^\60\2\u060b\u0609\3\2\2\2\u060c")
buf.write("\u060f\3\2\2\2\u060d\u060b\3\2\2\2\u060d\u060e\3\2\2\2")
buf.write("\u060e\u0610\3\2\2\2\u060f\u060d\3\2\2\2\u0610\u0613\7")
buf.write("\5\2\2\u0611\u0612\7\u00fa\2\2\u0612\u0614\7\u00a4\2\2")
buf.write("\u0613\u0611\3\2\2\2\u0613\u0614\3\2\2\2\u0614\u061f\3")
buf.write("\2\2\2\u0615\u0616\7z\2\2\u0616\u0617\7\4\2\2\u0617\u0618")
buf.write("\5\16\b\2\u0618\u0619\7\5\2\2\u0619\u061f\3\2\2\2\u061a")
buf.write("\u061b\7\4\2\2\u061b\u061c\5<\37\2\u061c\u061d\7\5\2\2")
buf.write("\u061d\u061f\3\2\2\2\u061e\u05fe\3\2\2\2\u061e\u0602\3")
buf.write("\2\2\2\u061e\u0606\3\2\2\2\u061e\u0615\3\2\2\2\u061e\u061a")
buf.write("\3\2\2\2\u061f]\3\2\2\2\u0620\u0621\5`\61\2\u0621_\3\2")
buf.write("\2\2\u0622\u0623\b\61\1\2\u0623\u0625\5d\63\2\u0624\u0626")
buf.write("\5b\62\2\u0625\u0624\3\2\2\2\u0625\u0626\3\2\2\2\u0626")
buf.write("\u062a\3\2\2\2\u0627\u0628\7\u0097\2\2\u0628\u062a\5`")
buf.write("\61\5\u0629\u0622\3\2\2\2\u0629\u0627\3\2\2\2\u062a\u0633")
buf.write("\3\2\2\2\u062b\u062c\f\4\2\2\u062c\u062d\7\31\2\2\u062d")
buf.write("\u0632\5`\61\5\u062e\u062f\f\3\2\2\u062f\u0630\7\u00a2")
buf.write("\2\2\u0630\u0632\5`\61\4\u0631\u062b\3\2\2\2\u0631\u062e")
buf.write("\3\2\2\2\u0632\u0635\3\2\2\2\u0633\u0631\3\2\2\2\u0633")
buf.write("\u0634\3\2\2\2\u0634a\3\2\2\2\u0635\u0633\3\2\2\2\u0636")
buf.write("\u0637\5p9\2\u0637\u0638\5d\63\2\u0638\u0674\3\2\2\2\u0639")
buf.write("\u063a\5p9\2\u063a\u063b\5r:\2\u063b\u063c\7\4\2\2\u063c")
buf.write("\u063d\5\16\b\2\u063d\u063e\7\5\2\2\u063e\u0674\3\2\2")
buf.write("\2\u063f\u0641\7\u0097\2\2\u0640\u063f\3\2\2\2\u0640\u0641")
buf.write("\3\2\2\2\u0641\u0642\3\2\2\2\u0642\u0643\7!\2\2\u0643")
buf.write("\u0644\5d\63\2\u0644\u0645\7\31\2\2\u0645\u0646\5d\63")
buf.write("\2\u0646\u0674\3\2\2\2\u0647\u0649\7\u0097\2\2\u0648\u0647")
buf.write("\3\2\2\2\u0648\u0649\3\2\2\2\u0649\u064a\3\2\2\2\u064a")
buf.write("\u064b\7j\2\2\u064b\u064c\7\4\2\2\u064c\u0651\5^\60\2")
buf.write("\u064d\u064e\7\6\2\2\u064e\u0650\5^\60\2\u064f\u064d\3")
buf.write("\2\2\2\u0650\u0653\3\2\2\2\u0651\u064f\3\2\2\2\u0651\u0652")
buf.write("\3\2\2\2\u0652\u0654\3\2\2\2\u0653\u0651\3\2\2\2\u0654")
buf.write("\u0655\7\5\2\2\u0655\u0674\3\2\2\2\u0656\u0658\7\u0097")
buf.write("\2\2\u0657\u0656\3\2\2\2\u0657\u0658\3\2\2\2\u0658\u0659")
buf.write("\3\2\2\2\u0659\u065a\7j\2\2\u065a\u065b\7\4\2\2\u065b")
buf.write("\u065c\5\16\b\2\u065c\u065d\7\5\2\2\u065d\u0674\3\2\2")
buf.write("\2\u065e\u0660\7\u0097\2\2\u065f\u065e\3\2\2\2\u065f\u0660")
buf.write("\3\2\2\2\u0660\u0661\3\2\2\2\u0661\u0662\7}\2\2\u0662")
buf.write("\u0665\5d\63\2\u0663\u0664\7M\2\2\u0664\u0666\5d\63\2")
buf.write("\u0665\u0663\3\2\2\2\u0665\u0666\3\2\2\2\u0666\u0674\3")
buf.write("\2\2\2\u0667\u0669\7u\2\2\u0668\u066a\7\u0097\2\2\u0669")
buf.write("\u0668\3\2\2\2\u0669\u066a\3\2\2\2\u066a\u066b\3\2\2\2")
buf.write("\u066b\u0674\7\u0098\2\2\u066c\u066e\7u\2\2\u066d\u066f")
buf.write("\7\u0097\2\2\u066e\u066d\3\2\2\2\u066e\u066f\3\2\2\2\u066f")
buf.write("\u0670\3\2\2\2\u0670\u0671\7E\2\2\u0671\u0672\7\\\2\2")
buf.write("\u0672\u0674\5d\63\2\u0673\u0636\3\2\2\2\u0673\u0639\3")
buf.write("\2\2\2\u0673\u0640\3\2\2\2\u0673\u0648\3\2\2\2\u0673\u0657")
buf.write("\3\2\2\2\u0673\u065f\3\2\2\2\u0673\u0667\3\2\2\2\u0673")
buf.write("\u066c\3\2\2\2\u0674c\3\2\2\2\u0675\u0676\b\63\1\2\u0676")
buf.write("\u067a\5f\64\2\u0677\u0678\t\20\2\2\u0678\u067a\5d\63")
buf.write("\6\u0679\u0675\3\2\2\2\u0679\u0677\3\2\2\2\u067a\u0689")
buf.write("\3\2\2\2\u067b\u067c\f\5\2\2\u067c\u067d\t\21\2\2\u067d")
buf.write("\u0688\5d\63\6\u067e\u067f\f\4\2\2\u067f\u0680\t\20\2")
buf.write("\2\u0680\u0688\5d\63\5\u0681\u0682\f\3\2\2\u0682\u0683")
buf.write("\7\u010c\2\2\u0683\u0688\5d\63\4\u0684\u0685\f\7\2\2\u0685")
buf.write("\u0686\7\36\2\2\u0686\u0688\5n8\2\u0687\u067b\3\2\2\2")
buf.write("\u0687\u067e\3\2\2\2\u0687\u0681\3\2\2\2\u0687\u0684\3")
buf.write("\2\2\2\u0688\u068b\3\2\2\2\u0689\u0687\3\2\2\2\u0689\u068a")
buf.write("\3\2\2\2\u068ae\3\2\2\2\u068b\u0689\3\2\2\2\u068c\u068d")
buf.write("\b\64\1\2\u068d\u07ad\7\u0098\2\2\u068e\u07ad\5v<\2\u068f")
buf.write("\u0690\5\u00b2Z\2\u0690\u0691\5l\67\2\u0691\u07ad\3\2")
buf.write("\2\2\u0692\u0693\7G\2\2\u0693\u0694\7\u00b2\2\2\u0694")
buf.write("\u07ad\5l\67\2\u0695\u07ad\5\u00b4[\2\u0696\u07ad\5t;")
buf.write("\2\u0697\u07ad\5l\67\2\u0698\u07ad\7\u0110\2\2\u0699\u07ad")
buf.write("\7\u010d\2\2\u069a\u069b\7\u00b0\2\2\u069b\u069c\7\4\2")
buf.write("\2\u069c\u069d\5d\63\2\u069d\u069e\7j\2\2\u069e\u069f")
buf.write("\5d\63\2\u069f\u06a0\7\5\2\2\u06a0\u07ad\3\2\2\2\u06a1")
buf.write("\u06a2\7\4\2\2\u06a2\u06a5\5^\60\2\u06a3\u06a4\7\6\2\2")
buf.write("\u06a4\u06a6\5^\60\2\u06a5\u06a3\3\2\2\2\u06a6\u06a7\3")
buf.write("\2\2\2\u06a7\u06a5\3\2\2\2\u06a7\u06a8\3\2\2\2\u06a8\u06a9")
buf.write("\3\2\2\2\u06a9\u06aa\7\5\2\2\u06aa\u07ad\3\2\2\2\u06ab")
buf.write("\u06ac\7\u00c6\2\2\u06ac\u06ad\7\4\2\2\u06ad\u06b2\5^")
buf.write("\60\2\u06ae\u06af\7\6\2\2\u06af\u06b1\5^\60\2\u06b0\u06ae")
buf.write("\3\2\2\2\u06b1\u06b4\3\2\2\2\u06b2\u06b0\3\2\2\2\u06b2")
buf.write("\u06b3\3\2\2\2\u06b3\u06b5\3\2\2\2\u06b4\u06b2\3\2\2\2")
buf.write("\u06b5\u06b6\7\5\2\2\u06b6\u07ad\3\2\2\2\u06b7\u06b8\7")
buf.write("\177\2\2\u06b8\u06ba\7\4\2\2\u06b9\u06bb\58\35\2\u06ba")
buf.write("\u06b9\3\2\2\2\u06ba\u06bb\3\2\2\2\u06bb\u06bc\3\2\2\2")
buf.write("\u06bc\u06bf\5^\60\2\u06bd\u06be\7\6\2\2\u06be\u06c0\5")
buf.write("l\67\2\u06bf\u06bd\3\2\2\2\u06bf\u06c0\3\2\2\2\u06c0\u06c4")
buf.write("\3\2\2\2\u06c1\u06c2\7\u009e\2\2\u06c2\u06c3\7\u00a8\2")
buf.write("\2\u06c3\u06c5\5F$\2\u06c4\u06c1\3\2\2\2\u06c4\u06c5\3")
buf.write("\2\2\2\u06c5\u06c6\3\2\2\2\u06c6\u06c7\7\5\2\2\u06c7\u06c8")
buf.write("\7\u00fb\2\2\u06c8\u06c9\7c\2\2\u06c9\u06ca\7\4\2\2\u06ca")
buf.write("\u06cb\7\u00a3\2\2\u06cb\u06cc\7\"\2\2\u06cc\u06d1\5(")
buf.write("\25\2\u06cd\u06ce\7\6\2\2\u06ce\u06d0\5(\25\2\u06cf\u06cd")
buf.write("\3\2\2\2\u06d0\u06d3\3\2\2\2\u06d1\u06cf\3\2\2\2\u06d1")
buf.write("\u06d2\3\2\2\2\u06d2\u06d4\3\2\2\2\u06d3\u06d1\3\2\2\2")
buf.write("\u06d4\u06d5\7\5\2\2\u06d5\u07ad\3\2\2\2\u06d6\u06d8\5")
buf.write("h\65\2\u06d7\u06d6\3\2\2\2\u06d7\u06d8\3\2\2\2\u06d8\u06d9")
buf.write("\3\2\2\2\u06d9\u06da\5\u00a6T\2\u06da\u06de\7\4\2\2\u06db")
buf.write("\u06dc\5\u00b2Z\2\u06dc\u06dd\7\3\2\2\u06dd\u06df\3\2")
buf.write("\2\2\u06de\u06db\3\2\2\2\u06de\u06df\3\2\2\2\u06df\u06e0")
buf.write("\3\2\2\2\u06e0\u06e1\7\u0109\2\2\u06e1\u06e3\7\5\2\2\u06e2")
buf.write("\u06e4\5\u0084C\2\u06e3\u06e2\3\2\2\2\u06e3\u06e4\3\2")
buf.write("\2\2\u06e4\u06e6\3\2\2\2\u06e5\u06e7\5\u0088E\2\u06e6")
buf.write("\u06e5\3\2\2\2\u06e6\u06e7\3\2\2\2\u06e7\u07ad\3\2\2\2")
buf.write("\u06e8\u06ea\5h\65\2\u06e9\u06e8\3\2\2\2\u06e9\u06ea\3")
buf.write("\2\2\2\u06ea\u06eb\3\2\2\2\u06eb\u06ec\5\u00a6T\2\u06ec")
buf.write("\u06f8\7\4\2\2\u06ed\u06ef\58\35\2\u06ee\u06ed\3\2\2\2")
buf.write("\u06ee\u06ef\3\2\2\2\u06ef\u06f0\3\2\2\2\u06f0\u06f5\5")
buf.write("^\60\2\u06f1\u06f2\7\6\2\2\u06f2\u06f4\5^\60\2\u06f3\u06f1")
buf.write("\3\2\2\2\u06f4\u06f7\3\2\2\2\u06f5\u06f3\3\2\2\2\u06f5")
buf.write("\u06f6\3\2\2\2\u06f6\u06f9\3\2\2\2\u06f7\u06f5\3\2\2\2")
buf.write("\u06f8\u06ee\3\2\2\2\u06f8\u06f9\3\2\2\2\u06f9\u0704\3")
buf.write("\2\2\2\u06fa\u06fb\7\u00a3\2\2\u06fb\u06fc\7\"\2\2\u06fc")
buf.write("\u0701\5(\25\2\u06fd\u06fe\7\6\2\2\u06fe\u0700\5(\25\2")
buf.write("\u06ff\u06fd\3\2\2\2\u0700\u0703\3\2\2\2\u0701\u06ff\3")
buf.write("\2\2\2\u0701\u0702\3\2\2\2\u0702\u0705\3\2\2\2\u0703\u0701")
buf.write("\3\2\2\2\u0704\u06fa\3\2\2\2\u0704\u0705\3\2\2\2\u0705")
buf.write("\u0706\3\2\2\2\u0706\u0708\7\5\2\2\u0707\u0709\5\u0084")
buf.write("C\2\u0708\u0707\3\2\2\2\u0708\u0709\3\2\2\2\u0709\u070e")
buf.write("\3\2\2\2\u070a\u070c\5j\66\2\u070b\u070a\3\2\2\2\u070b")
buf.write("\u070c\3\2\2\2\u070c\u070d\3\2\2\2\u070d\u070f\5\u0088")
buf.write("E\2\u070e\u070b\3\2\2\2\u070e\u070f\3\2\2\2\u070f\u07ad")
buf.write("\3\2\2\2\u0710\u0711\5\u00b2Z\2\u0711\u0712\5\u0088E\2")
buf.write("\u0712\u07ad\3\2\2\2\u0713\u0714\5\u00b2Z\2\u0714\u0715")
buf.write("\7\b\2\2\u0715\u0716\5^\60\2\u0716\u07ad\3\2\2\2\u0717")
buf.write("\u0720\7\4\2\2\u0718\u071d\5\u00b2Z\2\u0719\u071a\7\6")
buf.write("\2\2\u071a\u071c\5\u00b2Z\2\u071b\u0719\3\2\2\2\u071c")
buf.write("\u071f\3\2\2\2\u071d\u071b\3\2\2\2\u071d\u071e\3\2\2\2")
buf.write("\u071e\u0721\3\2\2\2\u071f\u071d\3\2\2\2\u0720\u0718\3")
buf.write("\2\2\2\u0720\u0721\3\2\2\2\u0721\u0722\3\2\2\2\u0722\u0723")
buf.write("\7\5\2\2\u0723\u0724\7\b\2\2\u0724\u07ad\5^\60\2\u0725")
buf.write("\u0726\7\4\2\2\u0726\u0727\5\16\b\2\u0727\u0728\7\5\2")
buf.write("\2\u0728\u07ad\3\2\2\2\u0729\u072a\7Q\2\2\u072a\u072b")
buf.write("\7\4\2\2\u072b\u072c\5\16\b\2\u072c\u072d\7\5\2\2\u072d")
buf.write("\u07ad\3\2\2\2\u072e\u072f\7%\2\2\u072f\u0731\5^\60\2")
buf.write("\u0730\u0732\5\u0082B\2\u0731\u0730\3\2\2\2\u0732\u0733")
buf.write("\3\2\2\2\u0733\u0731\3\2\2\2\u0733\u0734\3\2\2\2\u0734")
buf.write("\u0737\3\2\2\2\u0735\u0736\7I\2\2\u0736\u0738\5^\60\2")
buf.write("\u0737\u0735\3\2\2\2\u0737\u0738\3\2\2\2\u0738\u0739\3")
buf.write("\2\2\2\u0739\u073a\7K\2\2\u073a\u07ad\3\2\2\2\u073b\u073d")
buf.write("\7%\2\2\u073c\u073e\5\u0082B\2\u073d\u073c\3\2\2\2\u073e")
buf.write("\u073f\3\2\2\2\u073f\u073d\3\2\2\2\u073f\u0740\3\2\2\2")
buf.write("\u0740\u0743\3\2\2\2\u0741\u0742\7I\2\2\u0742\u0744\5")
buf.write("^\60\2\u0743\u0741\3\2\2\2\u0743\u0744\3\2\2\2\u0744\u0745")
buf.write("\3\2\2\2\u0745\u0746\7K\2\2\u0746\u07ad\3\2\2\2\u0747")
buf.write("\u0748\7&\2\2\u0748\u0749\7\4\2\2\u0749\u074a\5^\60\2")
buf.write("\u074a\u074b\7\34\2\2\u074b\u074c\5|?\2\u074c\u074d\7")
buf.write("\5\2\2\u074d\u07ad\3\2\2\2\u074e\u074f\7\u00e6\2\2\u074f")
buf.write("\u0750\7\4\2\2\u0750\u0751\5^\60\2\u0751\u0752\7\34\2")
buf.write("\2\u0752\u0753\5|?\2\u0753\u0754\7\5\2\2\u0754\u07ad\3")
buf.write("\2\2\2\u0755\u0756\7\33\2\2\u0756\u075f\7\t\2\2\u0757")
buf.write("\u075c\5^\60\2\u0758\u0759\7\6\2\2\u0759\u075b\5^\60\2")
buf.write("\u075a\u0758\3\2\2\2\u075b\u075e\3\2\2\2\u075c\u075a\3")
buf.write("\2\2\2\u075c\u075d\3\2\2\2\u075d\u0760\3\2\2\2\u075e\u075c")
buf.write("\3\2\2\2\u075f\u0757\3\2\2\2\u075f\u0760\3\2\2\2\u0760")
buf.write("\u0761\3\2\2\2\u0761\u07ad\7\n\2\2\u0762\u07ad\5\u00b2")
buf.write("Z\2\u0763\u07ad\7\64\2\2\u0764\u0768\78\2\2\u0765\u0766")
buf.write("\7\4\2\2\u0766\u0767\7\u0111\2\2\u0767\u0769\7\5\2\2\u0768")
buf.write("\u0765\3\2\2\2\u0768\u0769\3\2\2\2\u0769\u07ad\3\2\2\2")
buf.write("\u076a\u076e\79\2\2\u076b\u076c\7\4\2\2\u076c\u076d\7")
buf.write("\u0111\2\2\u076d\u076f\7\5\2\2\u076e\u076b\3\2\2\2\u076e")
buf.write("\u076f\3\2\2\2\u076f\u07ad\3\2\2\2\u0770\u0774\7\u0081")
buf.write("\2\2\u0771\u0772\7\4\2\2\u0772\u0773\7\u0111\2\2\u0773")
buf.write("\u0775\7\5\2\2\u0774\u0771\3\2\2\2\u0774\u0775\3\2\2\2")
buf.write("\u0775\u07ad\3\2\2\2\u0776\u077a\7\u0082\2\2\u0777\u0778")
buf.write("\7\4\2\2\u0778\u0779\7\u0111\2\2\u0779\u077b\7\5\2\2\u077a")
buf.write("\u0777\3\2\2\2\u077a\u077b\3\2\2\2\u077b\u07ad\3\2\2\2")
buf.write("\u077c\u07ad\7:\2\2\u077d\u07ad\7\63\2\2\u077e\u07ad\7")
buf.write("\67\2\2\u077f\u07ad\7\65\2\2\u0780\u0781\7\u00d8\2\2\u0781")
buf.write("\u0782\7\4\2\2\u0782\u0783\5d\63\2\u0783\u0784\7\\\2\2")
buf.write("\u0784\u0787\5d\63\2\u0785\u0786\7Z\2\2\u0786\u0788\5")
buf.write("d\63\2\u0787\u0785\3\2\2\2\u0787\u0788\3\2\2\2\u0788\u0789")
buf.write("\3\2\2\2\u0789\u078a\7\5\2\2\u078a\u07ad\3\2\2\2\u078b")
buf.write("\u078c\7\u0096\2\2\u078c\u078d\7\4\2\2\u078d\u0790\5d")
buf.write("\63\2\u078e\u078f\7\6\2\2\u078f\u0791\5z>\2\u0790\u078e")
buf.write("\3\2\2\2\u0790\u0791\3\2\2\2\u0791\u0792\3\2\2\2\u0792")
buf.write("\u0793\7\5\2\2\u0793\u07ad\3\2\2\2\u0794\u0795\7S\2\2")
buf.write("\u0795\u0796\7\4\2\2\u0796\u0797\5\u00b2Z\2\u0797\u0798")
buf.write("\7\\\2\2\u0798\u0799\5d\63\2\u0799\u079a\7\5\2\2\u079a")
buf.write("\u07ad\3\2\2\2\u079b\u079c\7\4\2\2\u079c\u079d\5^\60\2")
buf.write("\u079d\u079e\7\5\2\2\u079e\u07ad\3\2\2\2\u079f\u07a0\7")
buf.write("d\2\2\u07a0\u07a9\7\4\2\2\u07a1\u07a6\5\u00a6T\2\u07a2")
buf.write("\u07a3\7\6\2\2\u07a3\u07a5\5\u00a6T\2\u07a4\u07a2\3\2")
buf.write("\2\2\u07a5\u07a8\3\2\2\2\u07a6\u07a4\3\2\2\2\u07a6\u07a7")
buf.write("\3\2\2\2\u07a7\u07aa\3\2\2\2\u07a8\u07a6\3\2\2\2\u07a9")
buf.write("\u07a1\3\2\2\2\u07a9\u07aa\3\2\2\2\u07aa\u07ab\3\2\2\2")
buf.write("\u07ab\u07ad\7\5\2\2\u07ac\u068c\3\2\2\2\u07ac\u068e\3")
buf.write("\2\2\2\u07ac\u068f\3\2\2\2\u07ac\u0692\3\2\2\2\u07ac\u0695")
buf.write("\3\2\2\2\u07ac\u0696\3\2\2\2\u07ac\u0697\3\2\2\2\u07ac")
buf.write("\u0698\3\2\2\2\u07ac\u0699\3\2\2\2\u07ac\u069a\3\2\2\2")
buf.write("\u07ac\u06a1\3\2\2\2\u07ac\u06ab\3\2\2\2\u07ac\u06b7\3")
buf.write("\2\2\2\u07ac\u06d7\3\2\2\2\u07ac\u06e9\3\2\2\2\u07ac\u0710")
buf.write("\3\2\2\2\u07ac\u0713\3\2\2\2\u07ac\u0717\3\2\2\2\u07ac")
buf.write("\u0725\3\2\2\2\u07ac\u0729\3\2\2\2\u07ac\u072e\3\2\2\2")
buf.write("\u07ac\u073b\3\2\2\2\u07ac\u0747\3\2\2\2\u07ac\u074e\3")
buf.write("\2\2\2\u07ac\u0755\3\2\2\2\u07ac\u0762\3\2\2\2\u07ac\u0763")
buf.write("\3\2\2\2\u07ac\u0764\3\2\2\2\u07ac\u076a\3\2\2\2\u07ac")
buf.write("\u0770\3\2\2\2\u07ac\u0776\3\2\2\2\u07ac\u077c\3\2\2\2")
buf.write("\u07ac\u077d\3\2\2\2\u07ac\u077e\3\2\2\2\u07ac\u077f\3")
buf.write("\2\2\2\u07ac\u0780\3\2\2\2\u07ac\u078b\3\2\2\2\u07ac\u0794")
buf.write("\3\2\2\2\u07ac\u079b\3\2\2\2\u07ac\u079f\3\2\2\2\u07ad")
buf.write("\u07b8\3\2\2\2\u07ae\u07af\f\23\2\2\u07af\u07b0\7\t\2")
buf.write("\2\u07b0\u07b1\5d\63\2\u07b1\u07b2\7\n\2\2\u07b2\u07b7")
buf.write("\3\2\2\2\u07b3\u07b4\f\21\2\2\u07b4\u07b5\7\3\2\2\u07b5")
buf.write("\u07b7\5\u00b2Z\2\u07b6\u07ae\3\2\2\2\u07b6\u07b3\3\2")
buf.write("\2\2\u07b7\u07ba\3\2\2\2\u07b8\u07b6\3\2\2\2\u07b8\u07b9")
buf.write("\3\2\2\2\u07b9g\3\2\2\2\u07ba\u07b8\3\2\2\2\u07bb\u07bc")
buf.write("\t\22\2\2\u07bci\3\2\2\2\u07bd\u07be\7i\2\2\u07be\u07c2")
buf.write("\7\u009a\2\2\u07bf\u07c0\7\u00be\2\2\u07c0\u07c2\7\u009a")
buf.write("\2\2\u07c1\u07bd\3\2\2\2\u07c1\u07bf\3\2\2\2\u07c2k\3")
buf.write("\2\2\2\u07c3\u07ca\7\u010e\2\2\u07c4\u07c7\7\u010f\2\2")
buf.write("\u07c5\u07c6\7\u00e8\2\2\u07c6\u07c8\7\u010e\2\2\u07c7")
buf.write("\u07c5\3\2\2\2\u07c7\u07c8\3\2\2\2\u07c8\u07ca\3\2\2\2")
buf.write("\u07c9\u07c3\3\2\2\2\u07c9\u07c4\3\2\2\2\u07cam\3\2\2")
buf.write("\2\u07cb\u07cc\7\u00e0\2\2\u07cc\u07cd\7\u0100\2\2\u07cd")
buf.write("\u07d2\5v<\2\u07ce\u07cf\7\u00e0\2\2\u07cf\u07d0\7\u0100")
buf.write("\2\2\u07d0\u07d2\5l\67\2\u07d1\u07cb\3\2\2\2\u07d1\u07ce")
buf.write("\3\2\2\2\u07d2o\3\2\2\2\u07d3\u07d4\t\23\2\2\u07d4q\3")
buf.write("\2\2\2\u07d5\u07d6\t\24\2\2\u07d6s\3\2\2\2\u07d7\u07d8")
buf.write("\t\25\2\2\u07d8u\3\2\2\2\u07d9\u07db\7q\2\2\u07da\u07dc")
buf.write("\t\20\2\2\u07db\u07da\3\2\2\2\u07db\u07dc\3\2\2\2\u07dc")
buf.write("\u07dd\3\2\2\2\u07dd\u07de\5l\67\2\u07de\u07e1\5x=\2\u07df")
buf.write("\u07e0\7\u00e2\2\2\u07e0\u07e2\5x=\2\u07e1\u07df\3\2\2")
buf.write("\2\u07e1\u07e2\3\2\2\2\u07e2w\3\2\2\2\u07e3\u07e4\t\26")
buf.write("\2\2\u07e4y\3\2\2\2\u07e5\u07e6\t\27\2\2\u07e6{\3\2\2")
buf.write("\2\u07e7\u07e8\b?\1\2\u07e8\u07e9\7\u00c6\2\2\u07e9\u07ea")
buf.write("\7\4\2\2\u07ea\u07ef\5~@\2\u07eb\u07ec\7\6\2\2\u07ec\u07ee")
buf.write("\5~@\2\u07ed\u07eb\3\2\2\2\u07ee\u07f1\3\2\2\2\u07ef\u07ed")
buf.write("\3\2\2\2\u07ef\u07f0\3\2\2\2\u07f0\u07f2\3\2\2\2\u07f1")
buf.write("\u07ef\3\2\2\2\u07f2\u07f3\7\5\2\2\u07f3\u0843\3\2\2\2")
buf.write("\u07f4\u07f5\7q\2\2\u07f5\u07f8\5x=\2\u07f6\u07f7\7\u00e2")
buf.write("\2\2\u07f7\u07f9\5x=\2\u07f8\u07f6\3\2\2\2\u07f8\u07f9")
buf.write("\3\2\2\2\u07f9\u0843\3\2\2\2\u07fa\u07ff\7\u00e1\2\2\u07fb")
buf.write("\u07fc\7\4\2\2\u07fc\u07fd\5\u0080A\2\u07fd\u07fe\7\5")
buf.write("\2\2\u07fe\u0800\3\2\2\2\u07ff\u07fb\3\2\2\2\u07ff\u0800")
buf.write("\3\2\2\2\u0800\u0804\3\2\2\2\u0801\u0802\7\u00fc\2\2\u0802")
buf.write("\u0803\7\u00e0\2\2\u0803\u0805\7\u0100\2\2\u0804\u0801")
buf.write("\3\2\2\2\u0804\u0805\3\2\2\2\u0805\u0843\3\2\2\2\u0806")
buf.write("\u080b\7\u00e1\2\2\u0807\u0808\7\4\2\2\u0808\u0809\5\u0080")
buf.write("A\2\u0809\u080a\7\5\2\2\u080a\u080c\3\2\2\2\u080b\u0807")
buf.write("\3\2\2\2\u080b\u080c\3\2\2\2\u080c\u080d\3\2\2\2\u080d")
buf.write("\u080e\7\u00fa\2\2\u080e\u080f\7\u00e0\2\2\u080f\u0843")
buf.write("\7\u0100\2\2\u0810\u0815\7\u00e0\2\2\u0811\u0812\7\4\2")
buf.write("\2\u0812\u0813\5\u0080A\2\u0813\u0814\7\5\2\2\u0814\u0816")
buf.write("\3\2\2\2\u0815\u0811\3\2\2\2\u0815\u0816\3\2\2\2\u0816")
buf.write("\u081a\3\2\2\2\u0817\u0818\7\u00fc\2\2\u0818\u0819\7\u00e0")
buf.write("\2\2\u0819\u081b\7\u0100\2\2\u081a\u0817\3\2\2\2\u081a")
buf.write("\u081b\3\2\2\2\u081b\u0843\3\2\2\2\u081c\u0821\7\u00e0")
buf.write("\2\2\u081d\u081e\7\4\2\2\u081e\u081f\5\u0080A\2\u081f")
buf.write("\u0820\7\5\2\2\u0820\u0822\3\2\2\2\u0821\u081d\3\2\2\2")
buf.write("\u0821\u0822\3\2\2\2\u0822\u0823\3\2\2\2\u0823\u0824\7")
buf.write("\u00fa\2\2\u0824\u0825\7\u00e0\2\2\u0825\u0843\7\u0100")
buf.write("\2\2\u0826\u0827\7G\2\2\u0827\u0843\7\u00b2\2\2\u0828")
buf.write("\u0829\7\33\2\2\u0829\u082a\7\u0103\2\2\u082a\u082b\5")
buf.write("|?\2\u082b\u082c\7\u0105\2\2\u082c\u0843\3\2\2\2\u082d")
buf.write("\u082e\7\u0084\2\2\u082e\u082f\7\u0103\2\2\u082f\u0830")
buf.write("\5|?\2\u0830\u0831\7\6\2\2\u0831\u0832\5|?\2\u0832\u0833")
buf.write("\7\u0105\2\2\u0833\u0843\3\2\2\2\u0834\u0840\5\u00b2Z")
buf.write("\2\u0835\u0836\7\4\2\2\u0836\u083b\5\u0080A\2\u0837\u0838")
buf.write("\7\6\2\2\u0838\u083a\5\u0080A\2\u0839\u0837\3\2\2\2\u083a")
buf.write("\u083d\3\2\2\2\u083b\u0839\3\2\2\2\u083b\u083c\3\2\2\2")
buf.write("\u083c\u083e\3\2\2\2\u083d\u083b\3\2\2\2\u083e\u083f\7")
buf.write("\5\2\2\u083f\u0841\3\2\2\2\u0840\u0835\3\2\2\2\u0840\u0841")
buf.write("\3\2\2\2\u0841\u0843\3\2\2\2\u0842\u07e7\3\2\2\2\u0842")
buf.write("\u07f4\3\2\2\2\u0842\u07fa\3\2\2\2\u0842\u0806\3\2\2\2")
buf.write("\u0842\u0810\3\2\2\2\u0842\u081c\3\2\2\2\u0842\u0826\3")
buf.write("\2\2\2\u0842\u0828\3\2\2\2\u0842\u082d\3\2\2\2\u0842\u0834")
buf.write("\3\2\2\2\u0843\u084d\3\2\2\2\u0844\u0845\f\4\2\2\u0845")
buf.write("\u0849\7\33\2\2\u0846\u0847\7\t\2\2\u0847\u0848\7\u0111")
buf.write("\2\2\u0848\u084a\7\n\2\2\u0849\u0846\3\2\2\2\u0849\u084a")
buf.write("\3\2\2\2\u084a\u084c\3\2\2\2\u084b\u0844\3\2\2\2\u084c")
buf.write("\u084f\3\2\2\2\u084d\u084b\3\2\2\2\u084d\u084e\3\2\2\2")
buf.write("\u084e}\3\2\2\2\u084f\u084d\3\2\2\2\u0850\u0855\5|?\2")
buf.write("\u0851\u0852\5\u00b2Z\2\u0852\u0853\5|?\2\u0853\u0855")
buf.write("\3\2\2\2\u0854\u0850\3\2\2\2\u0854\u0851\3\2\2\2\u0855")
buf.write("\177\3\2\2\2\u0856\u0859\7\u0111\2\2\u0857\u0859\5|?\2")
buf.write("\u0858\u0856\3\2\2\2\u0858\u0857\3\2\2\2\u0859\u0081\3")
buf.write("\2\2\2\u085a\u085b\7\u00f7\2\2\u085b\u085c\5^\60\2\u085c")
buf.write("\u085d\7\u00de\2\2\u085d\u085e\5^\60\2\u085e\u0083\3\2")
buf.write("\2\2\u085f\u0860\7V\2\2\u0860\u0861\7\4\2\2\u0861\u0862")
buf.write("\7\u00f8\2\2\u0862\u0863\5`\61\2\u0863\u0864\7\5\2\2\u0864")
buf.write("\u0085\3\2\2\2\u0865\u0866\7\u00f7\2\2\u0866\u0869\7\u0086")
buf.write("\2\2\u0867\u0868\7\31\2\2\u0868\u086a\5^\60\2\u0869\u0867")
buf.write("\3\2\2\2\u0869\u086a\3\2\2\2\u086a\u086b\3\2\2\2\u086b")
buf.write("\u086c\7\u00de\2\2\u086c\u086d\7\u00ee\2\2\u086d\u086e")
buf.write("\7\u00d1\2\2\u086e\u086f\5\u00b2Z\2\u086f\u0870\7\u0101")
buf.write("\2\2\u0870\u0878\5^\60\2\u0871\u0872\7\6\2\2\u0872\u0873")
buf.write("\5\u00b2Z\2\u0873\u0874\7\u0101\2\2\u0874\u0875\5^\60")
buf.write("\2\u0875\u0877\3\2\2\2\u0876\u0871\3\2\2\2\u0877\u087a")
buf.write("\3\2\2\2\u0878\u0876\3\2\2\2\u0878\u0879\3\2\2\2\u0879")
buf.write("\u08a6\3\2\2\2\u087a\u0878\3\2\2\2\u087b\u087c\7\u00f7")
buf.write("\2\2\u087c\u087f\7\u0086\2\2\u087d\u087e\7\31\2\2\u087e")
buf.write("\u0880\5^\60\2\u087f\u087d\3\2\2\2\u087f\u0880\3\2\2\2")
buf.write("\u0880\u0881\3\2\2\2\u0881\u0882\7\u00de\2\2\u0882\u08a6")
buf.write("\7@\2\2\u0883\u0884\7\u00f7\2\2\u0884\u0885\7\u0097\2")
buf.write("\2\u0885\u0888\7\u0086\2\2\u0886\u0887\7\31\2\2\u0887")
buf.write("\u0889\5^\60\2\u0888\u0886\3\2\2\2\u0888\u0889\3\2\2\2")
buf.write("\u0889\u088a\3\2\2\2\u088a\u088b\7\u00de\2\2\u088b\u0897")
buf.write("\7o\2\2\u088c\u088d\7\4\2\2\u088d\u0892\5\u00b2Z\2\u088e")
buf.write("\u088f\7\6\2\2\u088f\u0891\5\u00b2Z\2\u0890\u088e\3\2")
buf.write("\2\2\u0891\u0894\3\2\2\2\u0892\u0890\3\2\2\2\u0892\u0893")
buf.write("\3\2\2\2\u0893\u0895\3\2\2\2\u0894\u0892\3\2\2\2\u0895")
buf.write("\u0896\7\5\2\2\u0896\u0898\3\2\2\2\u0897\u088c\3\2\2\2")
buf.write("\u0897\u0898\3\2\2\2\u0898\u0899\3\2\2\2\u0899\u089a\7")
buf.write("\u00f3\2\2\u089a\u089b\7\4\2\2\u089b\u08a0\5^\60\2\u089c")
buf.write("\u089d\7\6\2\2\u089d\u089f\5^\60\2\u089e\u089c\3\2\2\2")
buf.write("\u089f\u08a2\3\2\2\2\u08a0\u089e\3\2\2\2\u08a0\u08a1\3")
buf.write("\2\2\2\u08a1\u08a3\3\2\2\2\u08a2\u08a0\3\2\2\2\u08a3\u08a4")
buf.write("\7\5\2\2\u08a4\u08a6\3\2\2\2\u08a5\u0865\3\2\2\2\u08a5")
buf.write("\u087b\3\2\2\2\u08a5\u0883\3\2\2\2\u08a6\u0087\3\2\2\2")
buf.write("\u08a7\u08ad\7\u00a7\2\2\u08a8\u08ae\5\u00b2Z\2\u08a9")
buf.write("\u08aa\7\4\2\2\u08aa\u08ab\5\64\33\2\u08ab\u08ac\7\5\2")
buf.write("\2\u08ac\u08ae\3\2\2\2\u08ad\u08a8\3\2\2\2\u08ad\u08a9")
buf.write("\3\2\2\2\u08ae\u0089\3\2\2\2\u08af\u08b0\7\u008a\2\2\u08b0")
buf.write("\u08b5\5L\'\2\u08b1\u08b2\7\6\2\2\u08b2\u08b4\5L\'\2\u08b3")
buf.write("\u08b1\3\2\2\2\u08b4\u08b7\3\2\2\2\u08b5\u08b3\3\2\2\2")
buf.write("\u08b5\u08b6\3\2\2\2\u08b6\u08b9\3\2\2\2\u08b7\u08b5\3")
buf.write("\2\2\2\u08b8\u08af\3\2\2\2\u08b8\u08b9\3\2\2\2\u08b9\u08ba")
buf.write("\3\2\2\2\u08ba\u08be\5\u008cG\2\u08bb\u08bc\7\25\2\2\u08bc")
buf.write("\u08bd\7\u0085\2\2\u08bd\u08bf\5R*\2\u08be\u08bb\3\2\2")
buf.write("\2\u08be\u08bf\3\2\2\2\u08bf\u08c1\3\2\2\2\u08c0\u08c2")
buf.write("\t\17\2\2\u08c1\u08c0\3\2\2\2\u08c1\u08c2\3\2\2\2\u08c2")
buf.write("\u08c8\3\2\2\2\u08c3\u08c4\7\u00ad\2\2\u08c4\u08c5\7\4")
buf.write("\2\2\u08c5\u08c6\5\u0090I\2\u08c6\u08c7\7\5\2\2\u08c7")
buf.write("\u08c9\3\2\2\2\u08c8\u08c3\3\2\2\2\u08c8\u08c9\3\2\2\2")
buf.write("\u08c9\u08d3\3\2\2\2\u08ca\u08cb\7\u00d7\2\2\u08cb\u08d0")
buf.write("\5T+\2\u08cc\u08cd\7\6\2\2\u08cd\u08cf\5T+\2\u08ce\u08cc")
buf.write("\3\2\2\2\u08cf\u08d2\3\2\2\2\u08d0\u08ce\3\2\2\2\u08d0")
buf.write("\u08d1\3\2\2\2\u08d1\u08d4\3\2\2\2\u08d2\u08d0\3\2\2\2")
buf.write("\u08d3\u08ca\3\2\2\2\u08d3\u08d4\3\2\2\2\u08d4\u08de\3")
buf.write("\2\2\2\u08d5\u08d6\7D\2\2\u08d6\u08db\5V,\2\u08d7\u08d8")
buf.write("\7\6\2\2\u08d8\u08da\5V,\2\u08d9\u08d7\3\2\2\2\u08da\u08dd")
buf.write("\3\2\2\2\u08db\u08d9\3\2\2\2\u08db\u08dc\3\2\2\2\u08dc")
buf.write("\u08df\3\2\2\2\u08dd\u08db\3\2\2\2\u08de\u08d5\3\2\2\2")
buf.write("\u08de\u08df\3\2\2\2\u08df\u008b\3\2\2\2\u08e0\u08e1\7")
buf.write("\u00b6\2\2\u08e1\u08f9\5\u008eH\2\u08e2\u08e3\7\u00c7")
buf.write("\2\2\u08e3\u08f9\5\u008eH\2\u08e4\u08e5\7e\2\2\u08e5\u08f9")
buf.write("\5\u008eH\2\u08e6\u08e7\7\u00b6\2\2\u08e7\u08e8\7!\2\2")
buf.write("\u08e8\u08e9\5\u008eH\2\u08e9\u08ea\7\31\2\2\u08ea\u08eb")
buf.write("\5\u008eH\2\u08eb\u08f9\3\2\2\2\u08ec\u08ed\7\u00c7\2")
buf.write("\2\u08ed\u08ee\7!\2\2\u08ee\u08ef\5\u008eH\2\u08ef\u08f0")
buf.write("\7\31\2\2\u08f0\u08f1\5\u008eH\2\u08f1\u08f9\3\2\2\2\u08f2")
buf.write("\u08f3\7e\2\2\u08f3\u08f4\7!\2\2\u08f4\u08f5\5\u008eH")
buf.write("\2\u08f5\u08f6\7\31\2\2\u08f6\u08f7\5\u008eH\2\u08f7\u08f9")
buf.write("\3\2\2\2\u08f8\u08e0\3\2\2\2\u08f8\u08e2\3\2\2\2\u08f8")
buf.write("\u08e4\3\2\2\2\u08f8\u08e6\3\2\2\2\u08f8\u08ec\3\2\2\2")
buf.write("\u08f8\u08f2\3\2\2\2\u08f9\u008d\3\2\2\2\u08fa\u08fb\7")
buf.write("\u00e9\2\2\u08fb\u0904\7\u00b1\2\2\u08fc\u08fd\7\u00e9")
buf.write("\2\2\u08fd\u0904\7Y\2\2\u08fe\u08ff\7\62\2\2\u08ff\u0904")
buf.write("\7\u00c6\2\2\u0900\u0901\5^\60\2\u0901\u0902\t\30\2\2")
buf.write("\u0902\u0904\3\2\2\2\u0903\u08fa\3\2\2\2\u0903\u08fc\3")
buf.write("\2\2\2\u0903\u08fe\3\2\2\2\u0903\u0900\3\2\2\2\u0904\u008f")
buf.write("\3\2\2\2\u0905\u0906\bI\1\2\u0906\u0908\5\u0092J\2\u0907")
buf.write("\u0909\5\u0094K\2\u0908\u0907\3\2\2\2\u0908\u0909\3\2")
buf.write("\2\2\u0909\u0911\3\2\2\2\u090a\u090b\f\4\2\2\u090b\u0910")
buf.write("\5\u0090I\5\u090c\u090d\f\3\2\2\u090d\u090e\7\13\2\2\u090e")
buf.write("\u0910\5\u0090I\4\u090f\u090a\3\2\2\2\u090f\u090c\3\2")
buf.write("\2\2\u0910\u0913\3\2\2\2\u0911\u090f\3\2\2\2\u0911\u0912")
buf.write("\3\2\2\2\u0912\u0091\3\2\2\2\u0913\u0911\3\2\2\2\u0914")
buf.write("\u092e\5\u00b2Z\2\u0915\u0916\7\4\2\2\u0916\u092e\7\5")
buf.write("\2\2\u0917\u0918\7\u00af\2\2\u0918\u0919\7\4\2\2\u0919")
buf.write("\u091e\5\u0090I\2\u091a\u091b\7\6\2\2\u091b\u091d\5\u0090")
buf.write("I\2\u091c\u091a\3\2\2\2\u091d\u0920\3\2\2\2\u091e\u091c")
buf.write("\3\2\2\2\u091e\u091f\3\2\2\2\u091f\u0921\3\2\2\2\u0920")
buf.write("\u091e\3\2\2\2\u0921\u0922\7\5\2\2\u0922\u092e\3\2\2\2")
buf.write("\u0923\u0924\7\4\2\2\u0924\u0925\5\u0090I\2\u0925\u0926")
buf.write("\7\5\2\2\u0926\u092e\3\2\2\2\u0927\u092e\7\f\2\2\u0928")
buf.write("\u092e\7\r\2\2\u0929\u092a\7\16\2\2\u092a\u092b\5\u0090")
buf.write("I\2\u092b\u092c\7\17\2\2\u092c\u092e\3\2\2\2\u092d\u0914")
buf.write("\3\2\2\2\u092d\u0915\3\2\2\2\u092d\u0917\3\2\2\2\u092d")
buf.write("\u0923\3\2\2\2\u092d\u0927\3\2\2\2\u092d\u0928\3\2\2\2")
buf.write("\u092d\u0929\3\2\2\2\u092e\u0093\3\2\2\2\u092f\u0931\7")
buf.write("\u0109\2\2\u0930\u0932\7\u010d\2\2\u0931\u0930\3\2\2\2")
buf.write("\u0931\u0932\3\2\2\2\u0932\u094e\3\2\2\2\u0933\u0935\7")
buf.write("\u0107\2\2\u0934\u0936\7\u010d\2\2\u0935\u0934\3\2\2\2")
buf.write("\u0935\u0936\3\2\2\2\u0936\u094e\3\2\2\2\u0937\u0939\7")
buf.write("\u010d\2\2\u0938\u093a\7\u010d\2\2\u0939\u0938\3\2\2\2")
buf.write("\u0939\u093a\3\2\2\2\u093a\u094e\3\2\2\2\u093b\u093c\7")
buf.write("\20\2\2\u093c\u093d\7\u0111\2\2\u093d\u093f\7\21\2\2\u093e")
buf.write("\u0940\7\u010d\2\2\u093f\u093e\3\2\2\2\u093f\u0940\3\2")
buf.write("\2\2\u0940\u094e\3\2\2\2\u0941\u0943\7\20\2\2\u0942\u0944")
buf.write("\7\u0111\2\2\u0943\u0942\3\2\2\2\u0943\u0944\3\2\2\2\u0944")
buf.write("\u0945\3\2\2\2\u0945\u0947\7\6\2\2\u0946\u0948\7\u0111")
buf.write("\2\2\u0947\u0946\3\2\2\2\u0947\u0948\3\2\2\2\u0948\u0949")
buf.write("\3\2\2\2\u0949\u094b\7\21\2\2\u094a\u094c\7\u010d\2\2")
buf.write("\u094b\u094a\3\2\2\2\u094b\u094c\3\2\2\2\u094c\u094e\3")
buf.write("\2\2\2\u094d\u092f\3\2\2\2\u094d\u0933\3\2\2\2\u094d\u0937")
buf.write("\3\2\2\2\u094d\u093b\3\2\2\2\u094d\u0941\3\2\2\2\u094e")
buf.write("\u0095\3\2\2\2\u094f\u0950\5\u00b2Z\2\u0950\u0951\7\u0101")
buf.write("\2\2\u0951\u0952\5^\60\2\u0952\u0097\3\2\2\2\u0953\u0954")
buf.write("\7[\2\2\u0954\u0958\t\31\2\2\u0955\u0956\7\u00e7\2\2\u0956")
buf.write("\u0958\t\32\2\2\u0957\u0953\3\2\2\2\u0957\u0955\3\2\2")
buf.write("\2\u0958\u0099\3\2\2\2\u0959\u095a\7v\2\2\u095a\u095b")
buf.write("\7|\2\2\u095b\u095f\5\u009cO\2\u095c\u095d\7\u00b7\2\2")
buf.write("\u095d\u095f\t\33\2\2\u095e\u0959\3\2\2\2\u095e\u095c")
buf.write("\3\2\2\2\u095f\u009b\3\2\2\2\u0960\u0961\7\u00b7\2\2\u0961")
buf.write("\u0968\7\u00ea\2\2\u0962\u0963\7\u00b7\2\2\u0963\u0968")
buf.write("\7,\2\2\u0964\u0965\7\u00bb\2\2\u0965\u0968\7\u00b7\2")
buf.write("\2\u0966\u0968\7\u00cf\2\2\u0967\u0960\3\2\2\2\u0967\u0962")
buf.write("\3\2\2\2\u0967\u0964\3\2\2\2\u0967\u0966\3\2\2\2\u0968")
buf.write("\u009d\3\2\2\2\u0969\u096f\5^\60\2\u096a\u096b\5\u00b2")
buf.write("Z\2\u096b\u096c\7\22\2\2\u096c\u096d\5^\60\2\u096d\u096f")
buf.write("\3\2\2\2\u096e\u0969\3\2\2\2\u096e\u096a\3\2\2\2\u096f")
buf.write("\u009f\3\2\2\2\u0970\u0971\5\u00b2Z\2\u0971\u0972\7\3")
buf.write("\2\2\u0972\u0973\5\u00b2Z\2\u0973\u0976\3\2\2\2\u0974")
buf.write("\u0976\5\u00b2Z\2\u0975\u0970\3\2\2\2\u0975\u0974\3\2")
buf.write("\2\2\u0976\u00a1\3\2\2\2\u0977\u097c\5\u00a0Q\2\u0978")
buf.write("\u0979\7\6\2\2\u0979\u097b\5\u00a0Q\2\u097a\u0978\3\2")
buf.write("\2\2\u097b\u097e\3\2\2\2\u097c\u097a\3\2\2\2\u097c\u097d")
buf.write("\3\2\2\2\u097d\u00a3\3\2\2\2\u097e\u097c\3\2\2\2\u097f")
buf.write("\u0980\t\34\2\2\u0980\u00a5\3\2\2\2\u0981\u0986\5\u00b2")
buf.write("Z\2\u0982\u0983\7\3\2\2\u0983\u0985\5\u00b2Z\2\u0984\u0982")
buf.write("\3\2\2\2\u0985\u0988\3\2\2\2\u0986\u0984\3\2\2\2\u0986")
buf.write("\u0987\3\2\2\2\u0987\u00a7\3\2\2\2\u0988\u0986\3\2\2\2")
buf.write("\u0989\u098a\7Z\2\2\u098a\u098b\5\u00aaV\2\u098b\u098c")
buf.write("\7\34\2\2\u098c\u098d\7\u009d\2\2\u098d\u098e\5d\63\2")
buf.write("\u098e\u00a9\3\2\2\2\u098f\u0990\t\35\2\2\u0990\u00ab")
buf.write("\3\2\2\2\u0991\u0995\5\u00aeX\2\u0992\u0995\7:\2\2\u0993")
buf.write("\u0995\7\66\2\2\u0994\u0991\3\2\2\2\u0994\u0992\3\2\2")
buf.write("\2\u0994\u0993\3\2\2\2\u0995\u00ad\3\2\2\2\u0996\u099c")
buf.write("\5\u00b2Z\2\u0997\u0998\7\u00f0\2\2\u0998\u099c\5\u00b2")
buf.write("Z\2\u0999\u099a\7\u00c2\2\2\u099a\u099c\5\u00b2Z\2\u099b")
buf.write("\u0996\3\2\2\2\u099b\u0997\3\2\2\2\u099b\u0999\3\2\2\2")
buf.write("\u099c\u00af\3\2\2\2\u099d\u09a2\5\u00b2Z\2\u099e\u099f")
buf.write("\7\6\2\2\u099f\u09a1\5\u00b2Z\2\u09a0\u099e\3\2\2\2\u09a1")
buf.write("\u09a4\3\2\2\2\u09a2\u09a0\3\2\2\2\u09a2\u09a3\3\2\2\2")
buf.write("\u09a3\u00b1\3\2\2\2\u09a4\u09a2\3\2\2\2\u09a5\u09ab\7")
buf.write("\u0114\2\2\u09a6\u09ab\7\u0116\2\2\u09a7\u09ab\5\u00b6")
buf.write("\\\2\u09a8\u09ab\7\u0117\2\2\u09a9\u09ab\7\u0115\2\2\u09aa")
buf.write("\u09a5\3\2\2\2\u09aa\u09a6\3\2\2\2\u09aa\u09a7\3\2\2\2")
buf.write("\u09aa\u09a8\3\2\2\2\u09aa\u09a9\3\2\2\2\u09ab\u00b3\3")
buf.write("\2\2\2\u09ac\u09ae\7\u0108\2\2\u09ad\u09ac\3\2\2\2\u09ad")
buf.write("\u09ae\3\2\2\2\u09ae\u09af\3\2\2\2\u09af\u09b9\7\u0112")
buf.write("\2\2\u09b0\u09b2\7\u0108\2\2\u09b1\u09b0\3\2\2\2\u09b1")
buf.write("\u09b2\3\2\2\2\u09b2\u09b3\3\2\2\2\u09b3\u09b9\7\u0113")
buf.write("\2\2\u09b4\u09b6\7\u0108\2\2\u09b5\u09b4\3\2\2\2\u09b5")
buf.write("\u09b6\3\2\2\2\u09b6\u09b7\3\2\2\2\u09b7\u09b9\7\u0111")
buf.write("\2\2\u09b8\u09ad\3\2\2\2\u09b8\u09b1\3\2\2\2\u09b8\u09b5")
buf.write("\3\2\2\2\u09b9\u00b5\3\2\2\2\u09ba\u09bb\t\36\2\2\u09bb")
buf.write("\u00b7\3\2\2\2\u0148\u00d4\u00d9\u00dd\u00e3\u00e7\u00fc")
buf.write("\u0100\u0104\u0108\u0110\u0114\u0117\u011e\u0127\u012d")
buf.write("\u0131\u0137\u013e\u0147\u0153\u015c\u0162\u016d\u0175")
buf.write("\u017d\u0184\u018e\u0195\u01b2\u01b5\u01b8\u01bc\u01c2")
buf.write("\u01c7\u01ce\u01d3\u01d7\u01df\u01e5\u01e9\u01f7\u01ff")
buf.write("\u020a\u0223\u0226\u0230\u0234\u023b\u0245\u024b\u0250")
buf.write("\u0254\u025a\u0263\u0269\u026d\u0274\u0278\u0280\u0285")
buf.write("\u0289\u0291\u0299\u029e\u02a2\u02ac\u02b3\u02b8\u02bc")
buf.write("\u02c6\u02c9\u02d2\u02d7\u02dd\u02f5\u02fb\u02fd\u0303")
buf.write("\u0309\u030b\u0313\u0315\u031b\u0321\u0323\u0332\u0337")
buf.write("\u033e\u034a\u034c\u0354\u0356\u0368\u036b\u036f\u0373")
buf.write("\u0385\u0388\u0398\u03a2\u03a7\u03ad\u03b0\u03b9\u03bb")
buf.write("\u03be\u03c4\u03cb\u03d0\u03d6\u03da\u03de\u03e4\u03ef")
buf.write("\u03fe\u0401\u0406\u0408\u040f\u0415\u0417\u041b\u0425")
buf.write("\u042b\u042e\u0430\u043c\u0443\u0447\u044b\u044f\u0456")
buf.write("\u045f\u0462\u0466\u046b\u046f\u0477\u047a\u047d\u0484")
buf.write("\u048f\u0492\u049c\u049f\u04aa\u04af\u04b7\u04ba\u04be")
buf.write("\u04c7\u04d0\u04d3\u04dc\u04df\u04e2\u04e6\u04f1\u04f4")
buf.write("\u04fb\u04fe\u0511\u0515\u0519\u051d\u0521\u0525\u0527")
buf.write("\u0532\u0537\u0540\u0547\u054a\u0550\u055c\u055f\u0568")
buf.write("\u056b\u0573\u0576\u0579\u057e\u0581\u058d\u0590\u0598")
buf.write("\u059d\u05a1\u05a3\u05a5\u05b4\u05b6\u05c1\u05d6\u05e0")
buf.write("\u05eb\u05ef\u05f1\u05f9\u0600\u060d\u0613\u061e\u0625")
buf.write("\u0629\u0631\u0633\u0640\u0648\u0651\u0657\u065f\u0665")
buf.write("\u0669\u066e\u0673\u0679\u0687\u0689\u06a7\u06b2\u06ba")
buf.write("\u06bf\u06c4\u06d1\u06d7\u06de\u06e3\u06e6\u06e9\u06ee")
buf.write("\u06f5\u06f8\u0701\u0704\u0708\u070b\u070e\u071d\u0720")
buf.write("\u0733\u0737\u073f\u0743\u075c\u075f\u0768\u076e\u0774")
buf.write("\u077a\u0787\u0790\u07a6\u07a9\u07ac\u07b6\u07b8\u07c1")
buf.write("\u07c7\u07c9\u07d1\u07db\u07e1\u07ef\u07f8\u07ff\u0804")
buf.write("\u080b\u0815\u081a\u0821\u083b\u0840\u0842\u0849\u084d")
buf.write("\u0854\u0858\u0869\u0878\u087f\u0888\u0892\u0897\u08a0")
buf.write("\u08a5\u08ad\u08b5\u08b8\u08be\u08c1\u08c8\u08d0\u08d3")
buf.write("\u08db\u08de\u08f8\u0903\u0908\u090f\u0911\u091e\u092d")
buf.write("\u0931\u0935\u0939\u093f\u0943\u0947\u094b\u094d\u0957")
buf.write("\u095e\u0967\u096e\u0975\u097c\u0986\u0994\u099b\u09a2")
buf.write("\u09aa\u09ad\u09b1\u09b5\u09b8")
return buf.getvalue()
class SqlBaseParser ( Parser ):
grammarFileName = "SqlBase.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'.'", "'('", "')'", "','", "'SKIP'",
"'->'", "'['", "']'", "'|'", "'^'", "'$'", "'{-'",
"'-}'", "'{'", "'}'", "'=>'", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "'NFC'", "'NFD'",
"'NFKC'", "'NFKD'", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "'='", "<INVALID>", "'<'",
"'<='", "'>'", "'>='", "'+'", "'-'", "'*'", "'/'",
"'%'", "'||'", "'?'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "ADD", "ADMIN", "AFTER", "ALL", "ALTER",
"ANALYZE", "AND", "ANY", "ARRAY", "AS", "ASC", "AT",
"AUTHORIZATION", "BERNOULLI", "BETWEEN", "BY", "CALL",
"CASCADE", "CASE", "CAST", "CATALOGS", "COLUMN", "COLUMNS",
"COMMENT", "COMMIT", "COMMITTED", "CONSTRAINT", "COUNT",
"CREATE", "CROSS", "CUBE", "CURRENT", "CURRENT_CATALOG",
"CURRENT_DATE", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA",
"CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER",
"DATA", "DATE", "DAY", "DEALLOCATE", "DEFINER", "DELETE",
"DENY", "DESC", "DESCRIBE", "DEFINE", "DISTINCT",
"DISTRIBUTED", "DOUBLE", "DROP", "ELSE", "EMPTY",
"END", "ERROR", "ESCAPE", "EXCEPT", "EXCLUDING", "EXECUTE",
"EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FETCH",
"FILTER", "FINAL", "FIRST", "FOLLOWING", "FOR", "FORMAT",
"FROM", "FULL", "FUNCTIONS", "GRANT", "GRANTED", "GRANTS",
"GRAPHVIZ", "GROUP", "GROUPING", "GROUPS", "HAVING",
"HOUR", "IF", "IGNORE", "IN", "INCLUDING", "INITIAL",
"INNER", "INPUT", "INSERT", "INTERSECT", "INTERVAL",
"INTO", "INVOKER", "IO", "IS", "ISOLATION", "JOIN",
"JSON", "LAST", "LATERAL", "LEFT", "LEVEL", "LIKE",
"LIMIT", "LISTAGG", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP",
"LOGICAL", "MAP", "MATCH", "MATCHED", "MATCHES", "MATCH_RECOGNIZE",
"MATERIALIZED", "MEASURES", "MERGE", "MINUTE", "MONTH",
"NATURAL", "NEXT", "NFC", "NFD", "NFKC", "NFKD", "NO",
"NONE", "NORMALIZE", "NOT", "NULL", "NULLIF", "NULLS",
"OFFSET", "OMIT", "OF", "ON", "ONE", "ONLY", "OPTION",
"OR", "ORDER", "ORDINALITY", "OUTER", "OUTPUT", "OVER",
"OVERFLOW", "PARTITION", "PARTITIONS", "PAST", "PATH",
"PATTERN", "PER", "PERMUTE", "POSITION", "PRECEDING",
"PRECISION", "PREPARE", "PRIVILEGES", "PROPERTIES",
"RANGE", "READ", "RECURSIVE", "REFRESH", "RENAME",
"REPEATABLE", "REPLACE", "RESET", "RESPECT", "RESTRICT",
"REVOKE", "RIGHT", "ROLE", "ROLES", "ROLLBACK", "ROLLUP",
"ROW", "ROWS", "RUNNING", "SCHEMA", "SCHEMAS", "SECOND",
"SECURITY", "SEEK", "SELECT", "SERIALIZABLE", "SESSION",
"SET", "SETS", "SHOW", "SOME", "START", "STATS", "SUBSET",
"SUBSTRING", "SYSTEM", "TABLE", "TABLES", "TABLESAMPLE",
"TEXT", "THEN", "TIES", "TIME", "TIMESTAMP", "TO",
"TRANSACTION", "TRUE", "TRUNCATE", "TRY_CAST", "TYPE",
"UESCAPE", "UNBOUNDED", "UNCOMMITTED", "UNION", "UNMATCHED",
"UNNEST", "UPDATE", "USE", "USER", "USING", "VALIDATE",
"VALUES", "VERBOSE", "VERSION", "VIEW", "WHEN", "WHERE",
"WINDOW", "WITH", "WITHIN", "WITHOUT", "WORK", "WRITE",
"YEAR", "ZONE", "EQ", "NEQ", "LT", "LTE", "GT", "GTE",
"PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CONCAT",
"QUESTION_MARK", "STRING", "UNICODE_STRING", "BINARY_LITERAL",
"INTEGER_VALUE", "DECIMAL_VALUE", "DOUBLE_VALUE",
"IDENTIFIER", "DIGIT_IDENTIFIER", "QUOTED_IDENTIFIER",
"BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT",
"WS", "UNRECOGNIZED", "DELIMITER" ]
RULE_singleStatement = 0
RULE_standaloneExpression = 1
RULE_standalonePathSpecification = 2
RULE_standaloneType = 3
RULE_standaloneRowPattern = 4
RULE_statement = 5
RULE_query = 6
RULE_with_ = 7
RULE_tableElement = 8
RULE_columnDefinition = 9
RULE_likeClause = 10
RULE_properties = 11
RULE_propertyAssignments = 12
RULE_property_ = 13
RULE_queryNoWith = 14
RULE_limitRowCount = 15
RULE_rowCount = 16
RULE_queryTerm = 17
RULE_queryPrimary = 18
RULE_sortItem = 19
RULE_querySpecification = 20
RULE_groupBy = 21
RULE_groupingElement = 22
RULE_groupingSet = 23
RULE_windowDefinition = 24
RULE_windowSpecification = 25
RULE_namedQuery = 26
RULE_setQuantifier = 27
RULE_selectItem = 28
RULE_relation = 29
RULE_joinType = 30
RULE_joinCriteria = 31
RULE_sampledRelation = 32
RULE_sampleType = 33
RULE_listAggOverflowBehavior = 34
RULE_listaggCountIndication = 35
RULE_patternRecognition = 36
RULE_measureDefinition = 37
RULE_rowsPerMatch = 38
RULE_emptyMatchHandling = 39
RULE_skipTo = 40
RULE_subsetDefinition = 41
RULE_variableDefinition = 42
RULE_aliasedRelation = 43
RULE_columnAliases = 44
RULE_relationPrimary = 45
RULE_expression = 46
RULE_booleanExpression = 47
RULE_predicate = 48
RULE_valueExpression = 49
RULE_primaryExpression = 50
RULE_processingMode = 51
RULE_nullTreatment = 52
RULE_string = 53
RULE_timeZoneSpecifier = 54
RULE_comparisonOperator = 55
RULE_comparisonQuantifier = 56
RULE_booleanValue = 57
RULE_interval = 58
RULE_intervalField = 59
RULE_normalForm = 60
RULE_type_ = 61
RULE_rowField = 62
RULE_typeParameter = 63
RULE_whenClause = 64
RULE_filter_ = 65
RULE_mergeCase = 66
RULE_over = 67
RULE_windowFrame = 68
RULE_frameExtent = 69
RULE_frameBound = 70
RULE_rowPattern = 71
RULE_patternPrimary = 72
RULE_patternQuantifier = 73
RULE_updateAssignment = 74
RULE_explainOption = 75
RULE_transactionMode = 76
RULE_levelOfIsolation = 77
RULE_callArgument = 78
RULE_pathElement = 79
RULE_pathSpecification = 80
RULE_privilege = 81
RULE_qualifiedName = 82
RULE_queryPeriod = 83
RULE_rangeType = 84
RULE_grantor = 85
RULE_principal = 86
RULE_roles = 87
RULE_identifier = 88
RULE_number = 89
RULE_nonReserved = 90
ruleNames = [ "singleStatement", "standaloneExpression", "standalonePathSpecification",
"standaloneType", "standaloneRowPattern", "statement",
"query", "with_", "tableElement", "columnDefinition",
"likeClause", "properties", "propertyAssignments", "property_",
"queryNoWith", "limitRowCount", "rowCount", "queryTerm",
"queryPrimary", "sortItem", "querySpecification", "groupBy",
"groupingElement", "groupingSet", "windowDefinition",
"windowSpecification", "namedQuery", "setQuantifier",
"selectItem", "relation", "joinType", "joinCriteria",
"sampledRelation", "sampleType", "listAggOverflowBehavior",
"listaggCountIndication", "patternRecognition", "measureDefinition",
"rowsPerMatch", "emptyMatchHandling", "skipTo", "subsetDefinition",
"variableDefinition", "aliasedRelation", "columnAliases",
"relationPrimary", "expression", "booleanExpression",
"predicate", "valueExpression", "primaryExpression",
"processingMode", "nullTreatment", "string", "timeZoneSpecifier",
"comparisonOperator", "comparisonQuantifier", "booleanValue",
"interval", "intervalField", "normalForm", "type_", "rowField",
"typeParameter", "whenClause", "filter_", "mergeCase",
"over", "windowFrame", "frameExtent", "frameBound", "rowPattern",
"patternPrimary", "patternQuantifier", "updateAssignment",
"explainOption", "transactionMode", "levelOfIsolation",
"callArgument", "pathElement", "pathSpecification", "privilege",
"qualifiedName", "queryPeriod", "rangeType", "grantor",
"principal", "roles", "identifier", "number", "nonReserved" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
ADD=17
ADMIN=18
AFTER=19
ALL=20
ALTER=21
ANALYZE=22
AND=23
ANY=24
ARRAY=25
AS=26
ASC=27
AT=28
AUTHORIZATION=29
BERNOULLI=30
BETWEEN=31
BY=32
CALL=33
CASCADE=34
CASE=35
CAST=36
CATALOGS=37
COLUMN=38
COLUMNS=39
COMMENT=40
COMMIT=41
COMMITTED=42
CONSTRAINT=43
COUNT=44
CREATE=45
CROSS=46
CUBE=47
CURRENT=48
CURRENT_CATALOG=49
CURRENT_DATE=50
CURRENT_PATH=51
CURRENT_ROLE=52
CURRENT_SCHEMA=53
CURRENT_TIME=54
CURRENT_TIMESTAMP=55
CURRENT_USER=56
DATA=57
DATE=58
DAY=59
DEALLOCATE=60
DEFINER=61
DELETE=62
DENY=63
DESC=64
DESCRIBE=65
DEFINE=66
DISTINCT=67
DISTRIBUTED=68
DOUBLE=69
DROP=70
ELSE=71
EMPTY=72
END=73
ERROR=74
ESCAPE=75
EXCEPT=76
EXCLUDING=77
EXECUTE=78
EXISTS=79
EXPLAIN=80
EXTRACT=81
FALSE=82
FETCH=83
FILTER=84
FINAL=85
FIRST=86
FOLLOWING=87
FOR=88
FORMAT=89
FROM=90
FULL=91
FUNCTIONS=92
GRANT=93
GRANTED=94
GRANTS=95
GRAPHVIZ=96
GROUP=97
GROUPING=98
GROUPS=99
HAVING=100
HOUR=101
IF=102
IGNORE=103
IN=104
INCLUDING=105
INITIAL=106
INNER=107
INPUT=108
INSERT=109
INTERSECT=110
INTERVAL=111
INTO=112
INVOKER=113
IO=114
IS=115
ISOLATION=116
JOIN=117
JSON=118
LAST=119
LATERAL=120
LEFT=121
LEVEL=122
LIKE=123
LIMIT=124
LISTAGG=125
LOCAL=126
LOCALTIME=127
LOCALTIMESTAMP=128
LOGICAL=129
MAP=130
MATCH=131
MATCHED=132
MATCHES=133
MATCH_RECOGNIZE=134
MATERIALIZED=135
MEASURES=136
MERGE=137
MINUTE=138
MONTH=139
NATURAL=140
NEXT=141
NFC=142
NFD=143
NFKC=144
NFKD=145
NO=146
NONE=147
NORMALIZE=148
NOT=149
NULL=150
NULLIF=151
NULLS=152
OFFSET=153
OMIT=154
OF=155
ON=156
ONE=157
ONLY=158
OPTION=159
OR=160
ORDER=161
ORDINALITY=162
OUTER=163
OUTPUT=164
OVER=165
OVERFLOW=166
PARTITION=167
PARTITIONS=168
PAST=169
PATH=170
PATTERN=171
PER=172
PERMUTE=173
POSITION=174
PRECEDING=175
PRECISION=176
PREPARE=177
PRIVILEGES=178
PROPERTIES=179
RANGE=180
READ=181
RECURSIVE=182
REFRESH=183
RENAME=184
REPEATABLE=185
REPLACE=186
RESET=187
RESPECT=188
RESTRICT=189
REVOKE=190
RIGHT=191
ROLE=192
ROLES=193
ROLLBACK=194
ROLLUP=195
ROW=196
ROWS=197
RUNNING=198
SCHEMA=199
SCHEMAS=200
SECOND=201
SECURITY=202
SEEK=203
SELECT=204
SERIALIZABLE=205
SESSION=206
SET=207
SETS=208
SHOW=209
SOME=210
START=211
STATS=212
SUBSET=213
SUBSTRING=214
SYSTEM=215
TABLE=216
TABLES=217
TABLESAMPLE=218
TEXT=219
THEN=220
TIES=221
TIME=222
TIMESTAMP=223
TO=224
TRANSACTION=225
TRUE=226
TRUNCATE=227
TRY_CAST=228
TYPE=229
UESCAPE=230
UNBOUNDED=231
UNCOMMITTED=232
UNION=233
UNMATCHED=234
UNNEST=235
UPDATE=236
USE=237
USER=238
USING=239
VALIDATE=240
VALUES=241
VERBOSE=242
VERSION=243
VIEW=244
WHEN=245
WHERE=246
WINDOW=247
WITH=248
WITHIN=249
WITHOUT=250
WORK=251
WRITE=252
YEAR=253
ZONE=254
EQ=255
NEQ=256
LT=257
LTE=258
GT=259
GTE=260
PLUS=261
MINUS=262
ASTERISK=263
SLASH=264
PERCENT=265
CONCAT=266
QUESTION_MARK=267
STRING=268
UNICODE_STRING=269
BINARY_LITERAL=270
INTEGER_VALUE=271
DECIMAL_VALUE=272
DOUBLE_VALUE=273
IDENTIFIER=274
DIGIT_IDENTIFIER=275
QUOTED_IDENTIFIER=276
BACKQUOTED_IDENTIFIER=277
SIMPLE_COMMENT=278
BRACKETED_COMMENT=279
WS=280
UNRECOGNIZED=281
DELIMITER=282
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class SingleStatementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def statement(self):
return self.getTypedRuleContext(SqlBaseParser.StatementContext,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_singleStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSingleStatement" ):
listener.enterSingleStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSingleStatement" ):
listener.exitSingleStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSingleStatement" ):
return visitor.visitSingleStatement(self)
else:
return visitor.visitChildren(self)
def singleStatement(self):
localctx = SqlBaseParser.SingleStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_singleStatement)
try:
self.enterOuterAlt(localctx, 1)
self.state = 182
self.statement()
self.state = 183
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StandaloneExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_standaloneExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStandaloneExpression" ):
listener.enterStandaloneExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStandaloneExpression" ):
listener.exitStandaloneExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStandaloneExpression" ):
return visitor.visitStandaloneExpression(self)
else:
return visitor.visitChildren(self)
def standaloneExpression(self):
localctx = SqlBaseParser.StandaloneExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_standaloneExpression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 185
self.expression()
self.state = 186
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StandalonePathSpecificationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def pathSpecification(self):
return self.getTypedRuleContext(SqlBaseParser.PathSpecificationContext,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_standalonePathSpecification
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStandalonePathSpecification" ):
listener.enterStandalonePathSpecification(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStandalonePathSpecification" ):
listener.exitStandalonePathSpecification(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStandalonePathSpecification" ):
return visitor.visitStandalonePathSpecification(self)
else:
return visitor.visitChildren(self)
def standalonePathSpecification(self):
localctx = SqlBaseParser.StandalonePathSpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_standalonePathSpecification)
try:
self.enterOuterAlt(localctx, 1)
self.state = 188
self.pathSpecification()
self.state = 189
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StandaloneTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_standaloneType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStandaloneType" ):
listener.enterStandaloneType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStandaloneType" ):
listener.exitStandaloneType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStandaloneType" ):
return visitor.visitStandaloneType(self)
else:
return visitor.visitChildren(self)
def standaloneType(self):
localctx = SqlBaseParser.StandaloneTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_standaloneType)
try:
self.enterOuterAlt(localctx, 1)
self.state = 191
self.type_(0)
self.state = 192
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StandaloneRowPatternContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_standaloneRowPattern
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStandaloneRowPattern" ):
listener.enterStandaloneRowPattern(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStandaloneRowPattern" ):
listener.exitStandaloneRowPattern(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStandaloneRowPattern" ):
return visitor.visitStandaloneRowPattern(self)
else:
return visitor.visitChildren(self)
def standaloneRowPattern(self):
localctx = SqlBaseParser.StandaloneRowPatternContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_standaloneRowPattern)
try:
self.enterOuterAlt(localctx, 1)
self.state = 194
self.rowPattern(0)
self.state = 195
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_statement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ExplainContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def EXPLAIN(self):
return self.getToken(SqlBaseParser.EXPLAIN, 0)
def statement(self):
return self.getTypedRuleContext(SqlBaseParser.StatementContext,0)
def explainOption(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExplainOptionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExplainOptionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExplain" ):
listener.enterExplain(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExplain" ):
listener.exitExplain(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExplain" ):
return visitor.visitExplain(self)
else:
return visitor.visitChildren(self)
class PrepareContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def PREPARE(self):
return self.getToken(SqlBaseParser.PREPARE, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def statement(self):
return self.getTypedRuleContext(SqlBaseParser.StatementContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrepare" ):
listener.enterPrepare(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrepare" ):
listener.exitPrepare(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrepare" ):
return visitor.visitPrepare(self)
else:
return visitor.visitChildren(self)
class DropMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropMaterializedView" ):
listener.enterDropMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropMaterializedView" ):
listener.exitDropMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropMaterializedView" ):
return visitor.visitDropMaterializedView(self)
else:
return visitor.visitChildren(self)
class UseContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.schema = None # IdentifierContext
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def USE(self):
return self.getToken(SqlBaseParser.USE, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUse" ):
listener.enterUse(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUse" ):
listener.exitUse(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUse" ):
return visitor.visitUse(self)
else:
return visitor.visitChildren(self)
class DeallocateContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DEALLOCATE(self):
return self.getToken(SqlBaseParser.DEALLOCATE, 0)
def PREPARE(self):
return self.getToken(SqlBaseParser.PREPARE, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeallocate" ):
listener.enterDeallocate(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeallocate" ):
listener.exitDeallocate(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeallocate" ):
return visitor.visitDeallocate(self)
else:
return visitor.visitChildren(self)
class RenameTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.from_ = None # QualifiedNameContext
self.to = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameTable" ):
listener.enterRenameTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameTable" ):
listener.exitRenameTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameTable" ):
return visitor.visitRenameTable(self)
else:
return visitor.visitChildren(self)
class CommitContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def COMMIT(self):
return self.getToken(SqlBaseParser.COMMIT, 0)
def WORK(self):
return self.getToken(SqlBaseParser.WORK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCommit" ):
listener.enterCommit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCommit" ):
listener.exitCommit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCommit" ):
return visitor.visitCommit(self)
else:
return visitor.visitChildren(self)
class CreateRoleContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.name = None # IdentifierContext
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def ADMIN(self):
return self.getToken(SqlBaseParser.ADMIN, 0)
def grantor(self):
return self.getTypedRuleContext(SqlBaseParser.GrantorContext,0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateRole" ):
listener.enterCreateRole(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateRole" ):
listener.exitCreateRole(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateRole" ):
return visitor.visitCreateRole(self)
else:
return visitor.visitChildren(self)
class DropColumnContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.column = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def IF(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.IF)
else:
return self.getToken(SqlBaseParser.IF, i)
def EXISTS(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.EXISTS)
else:
return self.getToken(SqlBaseParser.EXISTS, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropColumn" ):
listener.enterDropColumn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropColumn" ):
listener.exitDropColumn(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropColumn" ):
return visitor.visitDropColumn(self)
else:
return visitor.visitChildren(self)
class DropViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropView" ):
listener.enterDropView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropView" ):
listener.exitDropView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropView" ):
return visitor.visitDropView(self)
else:
return visitor.visitChildren(self)
class ShowTablesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def TABLES(self):
return self.getToken(SqlBaseParser.TABLES, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowTables" ):
listener.enterShowTables(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowTables" ):
listener.exitShowTables(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowTables" ):
return visitor.visitShowTables(self)
else:
return visitor.visitChildren(self)
class SetViewAuthorizationContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.from_ = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetViewAuthorization" ):
listener.enterSetViewAuthorization(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetViewAuthorization" ):
listener.exitSetViewAuthorization(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetViewAuthorization" ):
return visitor.visitSetViewAuthorization(self)
else:
return visitor.visitChildren(self)
class ShowCatalogsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CATALOGS(self):
return self.getToken(SqlBaseParser.CATALOGS, 0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCatalogs" ):
listener.enterShowCatalogs(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCatalogs" ):
listener.exitShowCatalogs(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCatalogs" ):
return visitor.visitShowCatalogs(self)
else:
return visitor.visitChildren(self)
class ShowRolesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def ROLES(self):
return self.getToken(SqlBaseParser.ROLES, 0)
def CURRENT(self):
return self.getToken(SqlBaseParser.CURRENT, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowRoles" ):
listener.enterShowRoles(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowRoles" ):
listener.exitShowRoles(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowRoles" ):
return visitor.visitShowRoles(self)
else:
return visitor.visitChildren(self)
class MergeContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def MERGE(self):
return self.getToken(SqlBaseParser.MERGE, 0)
def INTO(self):
return self.getToken(SqlBaseParser.INTO, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def USING(self):
return self.getToken(SqlBaseParser.USING, 0)
def relation(self):
return self.getTypedRuleContext(SqlBaseParser.RelationContext,0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def mergeCase(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.MergeCaseContext)
else:
return self.getTypedRuleContext(SqlBaseParser.MergeCaseContext,i)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMerge" ):
listener.enterMerge(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMerge" ):
listener.exitMerge(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMerge" ):
return visitor.visitMerge(self)
else:
return visitor.visitChildren(self)
class RenameColumnContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.from_ = None # IdentifierContext
self.to = None # IdentifierContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def IF(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.IF)
else:
return self.getToken(SqlBaseParser.IF, i)
def EXISTS(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.EXISTS)
else:
return self.getToken(SqlBaseParser.EXISTS, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameColumn" ):
listener.enterRenameColumn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameColumn" ):
listener.exitRenameColumn(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameColumn" ):
return visitor.visitRenameColumn(self)
else:
return visitor.visitChildren(self)
class CommentColumnContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IS(self):
return self.getToken(SqlBaseParser.IS, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCommentColumn" ):
listener.enterCommentColumn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCommentColumn" ):
listener.exitCommentColumn(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCommentColumn" ):
return visitor.visitCommentColumn(self)
else:
return visitor.visitChildren(self)
class RevokeRolesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def REVOKE(self):
return self.getToken(SqlBaseParser.REVOKE, 0)
def roles(self):
return self.getTypedRuleContext(SqlBaseParser.RolesContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def principal(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrincipalContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,i)
def ADMIN(self):
return self.getToken(SqlBaseParser.ADMIN, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def GRANTED(self):
return self.getToken(SqlBaseParser.GRANTED, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def grantor(self):
return self.getTypedRuleContext(SqlBaseParser.GrantorContext,0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRevokeRoles" ):
listener.enterRevokeRoles(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRevokeRoles" ):
listener.exitRevokeRoles(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRevokeRoles" ):
return visitor.visitRevokeRoles(self)
else:
return visitor.visitChildren(self)
class ShowCreateTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCreateTable" ):
listener.enterShowCreateTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCreateTable" ):
listener.exitShowCreateTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCreateTable" ):
return visitor.visitShowCreateTable(self)
else:
return visitor.visitChildren(self)
class ShowColumnsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def COLUMNS(self):
return self.getToken(SqlBaseParser.COLUMNS, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def DESCRIBE(self):
return self.getToken(SqlBaseParser.DESCRIBE, 0)
def DESC(self):
return self.getToken(SqlBaseParser.DESC, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowColumns" ):
listener.enterShowColumns(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowColumns" ):
listener.exitShowColumns(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowColumns" ):
return visitor.visitShowColumns(self)
else:
return visitor.visitChildren(self)
class ShowRoleGrantsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def GRANTS(self):
return self.getToken(SqlBaseParser.GRANTS, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowRoleGrants" ):
listener.enterShowRoleGrants(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowRoleGrants" ):
listener.exitShowRoleGrants(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowRoleGrants" ):
return visitor.visitShowRoleGrants(self)
else:
return visitor.visitChildren(self)
class AddColumnContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.column = None # ColumnDefinitionContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def ADD(self):
return self.getToken(SqlBaseParser.ADD, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def columnDefinition(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnDefinitionContext,0)
def IF(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.IF)
else:
return self.getToken(SqlBaseParser.IF, i)
def EXISTS(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.EXISTS)
else:
return self.getToken(SqlBaseParser.EXISTS, i)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAddColumn" ):
listener.enterAddColumn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAddColumn" ):
listener.exitAddColumn(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAddColumn" ):
return visitor.visitAddColumn(self)
else:
return visitor.visitChildren(self)
class DenyContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.grantee = None # PrincipalContext
self.copyFrom(ctx)
def DENY(self):
return self.getToken(SqlBaseParser.DENY, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def privilege(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrivilegeContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrivilegeContext,i)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def PRIVILEGES(self):
return self.getToken(SqlBaseParser.PRIVILEGES, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeny" ):
listener.enterDeny(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeny" ):
listener.exitDeny(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeny" ):
return visitor.visitDeny(self)
else:
return visitor.visitChildren(self)
class ResetSessionContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def RESET(self):
return self.getToken(SqlBaseParser.RESET, 0)
def SESSION(self):
return self.getToken(SqlBaseParser.SESSION, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterResetSession" ):
listener.enterResetSession(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitResetSession" ):
listener.exitResetSession(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitResetSession" ):
return visitor.visitResetSession(self)
else:
return visitor.visitChildren(self)
class InsertIntoContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def INSERT(self):
return self.getToken(SqlBaseParser.INSERT, 0)
def INTO(self):
return self.getToken(SqlBaseParser.INTO, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInsertInto" ):
listener.enterInsertInto(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInsertInto" ):
listener.exitInsertInto(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInsertInto" ):
return visitor.visitInsertInto(self)
else:
return visitor.visitChildren(self)
class ShowSessionContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def SESSION(self):
return self.getToken(SqlBaseParser.SESSION, 0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowSession" ):
listener.enterShowSession(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowSession" ):
listener.exitShowSession(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowSession" ):
return visitor.visitShowSession(self)
else:
return visitor.visitChildren(self)
class CreateSchemaContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateSchema" ):
listener.enterCreateSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateSchema" ):
listener.exitCreateSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateSchema" ):
return visitor.visitCreateSchema(self)
else:
return visitor.visitChildren(self)
class ExplainAnalyzeContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def EXPLAIN(self):
return self.getToken(SqlBaseParser.EXPLAIN, 0)
def ANALYZE(self):
return self.getToken(SqlBaseParser.ANALYZE, 0)
def statement(self):
return self.getTypedRuleContext(SqlBaseParser.StatementContext,0)
def VERBOSE(self):
return self.getToken(SqlBaseParser.VERBOSE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExplainAnalyze" ):
listener.enterExplainAnalyze(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExplainAnalyze" ):
listener.exitExplainAnalyze(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExplainAnalyze" ):
return visitor.visitExplainAnalyze(self)
else:
return visitor.visitChildren(self)
class ExecuteContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def EXECUTE(self):
return self.getToken(SqlBaseParser.EXECUTE, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def USING(self):
return self.getToken(SqlBaseParser.USING, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExecute" ):
listener.enterExecute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExecute" ):
listener.exitExecute(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExecute" ):
return visitor.visitExecute(self)
else:
return visitor.visitChildren(self)
class RenameSchemaContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameSchema" ):
listener.enterRenameSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameSchema" ):
listener.exitRenameSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameSchema" ):
return visitor.visitRenameSchema(self)
else:
return visitor.visitChildren(self)
class DropRoleContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.name = None # IdentifierContext
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropRole" ):
listener.enterDropRole(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropRole" ):
listener.exitDropRole(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropRole" ):
return visitor.visitDropRole(self)
else:
return visitor.visitChildren(self)
class AnalyzeContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def ANALYZE(self):
return self.getToken(SqlBaseParser.ANALYZE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAnalyze" ):
listener.enterAnalyze(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAnalyze" ):
listener.exitAnalyze(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAnalyze" ):
return visitor.visitAnalyze(self)
else:
return visitor.visitChildren(self)
class SetRoleContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.role = None # IdentifierContext
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def NONE(self):
return self.getToken(SqlBaseParser.NONE, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetRole" ):
listener.enterSetRole(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetRole" ):
listener.exitSetRole(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetRole" ):
return visitor.visitSetRole(self)
else:
return visitor.visitChildren(self)
class ShowGrantsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def GRANTS(self):
return self.getToken(SqlBaseParser.GRANTS, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowGrants" ):
listener.enterShowGrants(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowGrants" ):
listener.exitShowGrants(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowGrants" ):
return visitor.visitShowGrants(self)
else:
return visitor.visitChildren(self)
class DropSchemaContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def CASCADE(self):
return self.getToken(SqlBaseParser.CASCADE, 0)
def RESTRICT(self):
return self.getToken(SqlBaseParser.RESTRICT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropSchema" ):
listener.enterDropSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropSchema" ):
listener.exitDropSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropSchema" ):
return visitor.visitDropSchema(self)
else:
return visitor.visitChildren(self)
class SetTableAuthorizationContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetTableAuthorization" ):
listener.enterSetTableAuthorization(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetTableAuthorization" ):
listener.exitSetTableAuthorization(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetTableAuthorization" ):
return visitor.visitSetTableAuthorization(self)
else:
return visitor.visitChildren(self)
class ShowCreateViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCreateView" ):
listener.enterShowCreateView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCreateView" ):
listener.exitShowCreateView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCreateView" ):
return visitor.visitShowCreateView(self)
else:
return visitor.visitChildren(self)
class CreateTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def tableElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.TableElementContext)
else:
return self.getTypedRuleContext(SqlBaseParser.TableElementContext,i)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateTable" ):
listener.enterCreateTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateTable" ):
listener.exitCreateTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateTable" ):
return visitor.visitCreateTable(self)
else:
return visitor.visitChildren(self)
class StartTransactionContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def START(self):
return self.getToken(SqlBaseParser.START, 0)
def TRANSACTION(self):
return self.getToken(SqlBaseParser.TRANSACTION, 0)
def transactionMode(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.TransactionModeContext)
else:
return self.getTypedRuleContext(SqlBaseParser.TransactionModeContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStartTransaction" ):
listener.enterStartTransaction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStartTransaction" ):
listener.exitStartTransaction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStartTransaction" ):
return visitor.visitStartTransaction(self)
else:
return visitor.visitChildren(self)
class CreateTableAsSelectContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def WITH(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.WITH)
else:
return self.getToken(SqlBaseParser.WITH, i)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def DATA(self):
return self.getToken(SqlBaseParser.DATA, 0)
def NO(self):
return self.getToken(SqlBaseParser.NO, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateTableAsSelect" ):
listener.enterCreateTableAsSelect(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateTableAsSelect" ):
listener.exitCreateTableAsSelect(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateTableAsSelect" ):
return visitor.visitCreateTableAsSelect(self)
else:
return visitor.visitChildren(self)
class ShowStatsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def STATS(self):
return self.getToken(SqlBaseParser.STATS, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowStats" ):
listener.enterShowStats(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowStats" ):
listener.exitShowStats(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowStats" ):
return visitor.visitShowStats(self)
else:
return visitor.visitChildren(self)
class ShowCreateSchemaContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCreateSchema" ):
listener.enterShowCreateSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCreateSchema" ):
listener.exitShowCreateSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCreateSchema" ):
return visitor.visitShowCreateSchema(self)
else:
return visitor.visitChildren(self)
class RevokeContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.grantee = None # PrincipalContext
self.copyFrom(ctx)
def REVOKE(self):
return self.getToken(SqlBaseParser.REVOKE, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def privilege(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrivilegeContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrivilegeContext,i)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def PRIVILEGES(self):
return self.getToken(SqlBaseParser.PRIVILEGES, 0)
def GRANT(self):
return self.getToken(SqlBaseParser.GRANT, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRevoke" ):
listener.enterRevoke(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRevoke" ):
listener.exitRevoke(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRevoke" ):
return visitor.visitRevoke(self)
else:
return visitor.visitChildren(self)
class UpdateContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.where = None # BooleanExpressionContext
self.copyFrom(ctx)
def UPDATE(self):
return self.getToken(SqlBaseParser.UPDATE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def updateAssignment(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.UpdateAssignmentContext)
else:
return self.getTypedRuleContext(SqlBaseParser.UpdateAssignmentContext,i)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUpdate" ):
listener.enterUpdate(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUpdate" ):
listener.exitUpdate(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUpdate" ):
return visitor.visitUpdate(self)
else:
return visitor.visitChildren(self)
class TableExecuteContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.procedureName = None # IdentifierContext
self.where = None # BooleanExpressionContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def EXECUTE(self):
return self.getToken(SqlBaseParser.EXECUTE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def callArgument(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.CallArgumentContext)
else:
return self.getTypedRuleContext(SqlBaseParser.CallArgumentContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTableExecute" ):
listener.enterTableExecute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTableExecute" ):
listener.exitTableExecute(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTableExecute" ):
return visitor.visitTableExecute(self)
else:
return visitor.visitChildren(self)
class DeleteContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DELETE(self):
return self.getToken(SqlBaseParser.DELETE, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelete" ):
listener.enterDelete(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelete" ):
listener.exitDelete(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelete" ):
return visitor.visitDelete(self)
else:
return visitor.visitChildren(self)
class DescribeInputContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DESCRIBE(self):
return self.getToken(SqlBaseParser.DESCRIBE, 0)
def INPUT(self):
return self.getToken(SqlBaseParser.INPUT, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDescribeInput" ):
listener.enterDescribeInput(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDescribeInput" ):
listener.exitDescribeInput(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDescribeInput" ):
return visitor.visitDescribeInput(self)
else:
return visitor.visitChildren(self)
class ShowStatsForQueryContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def STATS(self):
return self.getToken(SqlBaseParser.STATS, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowStatsForQuery" ):
listener.enterShowStatsForQuery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowStatsForQuery" ):
listener.exitShowStatsForQuery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowStatsForQuery" ):
return visitor.visitShowStatsForQuery(self)
else:
return visitor.visitChildren(self)
class StatementDefaultContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStatementDefault" ):
listener.enterStatementDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStatementDefault" ):
listener.exitStatementDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStatementDefault" ):
return visitor.visitStatementDefault(self)
else:
return visitor.visitChildren(self)
class SetTimeZoneContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def TIME(self):
return self.getToken(SqlBaseParser.TIME, 0)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def LOCAL(self):
return self.getToken(SqlBaseParser.LOCAL, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetTimeZone" ):
listener.enterSetTimeZone(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetTimeZone" ):
listener.exitSetTimeZone(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetTimeZone" ):
return visitor.visitSetTimeZone(self)
else:
return visitor.visitChildren(self)
class TruncateTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def TRUNCATE(self):
return self.getToken(SqlBaseParser.TRUNCATE, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTruncateTable" ):
listener.enterTruncateTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTruncateTable" ):
listener.exitTruncateTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTruncateTable" ):
return visitor.visitTruncateTable(self)
else:
return visitor.visitChildren(self)
class CreateMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def OR(self):
return self.getToken(SqlBaseParser.OR, 0)
def REPLACE(self):
return self.getToken(SqlBaseParser.REPLACE, 0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateMaterializedView" ):
listener.enterCreateMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateMaterializedView" ):
listener.exitCreateMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateMaterializedView" ):
return visitor.visitCreateMaterializedView(self)
else:
return visitor.visitChildren(self)
class SetSessionContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def SESSION(self):
return self.getToken(SqlBaseParser.SESSION, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetSession" ):
listener.enterSetSession(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetSession" ):
listener.exitSetSession(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetSession" ):
return visitor.visitSetSession(self)
else:
return visitor.visitChildren(self)
class CreateViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def OR(self):
return self.getToken(SqlBaseParser.OR, 0)
def REPLACE(self):
return self.getToken(SqlBaseParser.REPLACE, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def SECURITY(self):
return self.getToken(SqlBaseParser.SECURITY, 0)
def DEFINER(self):
return self.getToken(SqlBaseParser.DEFINER, 0)
def INVOKER(self):
return self.getToken(SqlBaseParser.INVOKER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateView" ):
listener.enterCreateView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateView" ):
listener.exitCreateView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateView" ):
return visitor.visitCreateView(self)
else:
return visitor.visitChildren(self)
class RenameMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.from_ = None # QualifiedNameContext
self.to = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameMaterializedView" ):
listener.enterRenameMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameMaterializedView" ):
listener.exitRenameMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameMaterializedView" ):
return visitor.visitRenameMaterializedView(self)
else:
return visitor.visitChildren(self)
class ShowSchemasContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def SCHEMAS(self):
return self.getToken(SqlBaseParser.SCHEMAS, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowSchemas" ):
listener.enterShowSchemas(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowSchemas" ):
listener.exitShowSchemas(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowSchemas" ):
return visitor.visitShowSchemas(self)
else:
return visitor.visitChildren(self)
class DropTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropTable" ):
listener.enterDropTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropTable" ):
listener.exitDropTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropTable" ):
return visitor.visitDropTable(self)
else:
return visitor.visitChildren(self)
class SetSchemaAuthorizationContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetSchemaAuthorization" ):
listener.enterSetSchemaAuthorization(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetSchemaAuthorization" ):
listener.exitSetSchemaAuthorization(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetSchemaAuthorization" ):
return visitor.visitSetSchemaAuthorization(self)
else:
return visitor.visitChildren(self)
class RollbackContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def ROLLBACK(self):
return self.getToken(SqlBaseParser.ROLLBACK, 0)
def WORK(self):
return self.getToken(SqlBaseParser.WORK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRollback" ):
listener.enterRollback(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRollback" ):
listener.exitRollback(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRollback" ):
return visitor.visitRollback(self)
else:
return visitor.visitChildren(self)
class CommentTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IS(self):
return self.getToken(SqlBaseParser.IS, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCommentTable" ):
listener.enterCommentTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCommentTable" ):
listener.exitCommentTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCommentTable" ):
return visitor.visitCommentTable(self)
else:
return visitor.visitChildren(self)
class RenameViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.from_ = None # QualifiedNameContext
self.to = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameView" ):
listener.enterRenameView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameView" ):
listener.exitRenameView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameView" ):
return visitor.visitRenameView(self)
else:
return visitor.visitChildren(self)
class SetPathContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def PATH(self):
return self.getToken(SqlBaseParser.PATH, 0)
def pathSpecification(self):
return self.getTypedRuleContext(SqlBaseParser.PathSpecificationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetPath" ):
listener.enterSetPath(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetPath" ):
listener.exitSetPath(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetPath" ):
return visitor.visitSetPath(self)
else:
return visitor.visitChildren(self)
class GrantRolesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def GRANT(self):
return self.getToken(SqlBaseParser.GRANT, 0)
def roles(self):
return self.getTypedRuleContext(SqlBaseParser.RolesContext,0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def principal(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrincipalContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,i)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def ADMIN(self):
return self.getToken(SqlBaseParser.ADMIN, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def GRANTED(self):
return self.getToken(SqlBaseParser.GRANTED, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def grantor(self):
return self.getTypedRuleContext(SqlBaseParser.GrantorContext,0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrantRoles" ):
listener.enterGrantRoles(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrantRoles" ):
listener.exitGrantRoles(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrantRoles" ):
return visitor.visitGrantRoles(self)
else:
return visitor.visitChildren(self)
class CallContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CALL(self):
return self.getToken(SqlBaseParser.CALL, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def callArgument(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.CallArgumentContext)
else:
return self.getTypedRuleContext(SqlBaseParser.CallArgumentContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCall" ):
listener.enterCall(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCall" ):
listener.exitCall(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCall" ):
return visitor.visitCall(self)
else:
return visitor.visitChildren(self)
class RefreshMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def REFRESH(self):
return self.getToken(SqlBaseParser.REFRESH, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRefreshMaterializedView" ):
listener.enterRefreshMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRefreshMaterializedView" ):
listener.exitRefreshMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRefreshMaterializedView" ):
return visitor.visitRefreshMaterializedView(self)
else:
return visitor.visitChildren(self)
class ShowCreateMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCreateMaterializedView" ):
listener.enterShowCreateMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCreateMaterializedView" ):
listener.exitShowCreateMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCreateMaterializedView" ):
return visitor.visitShowCreateMaterializedView(self)
else:
return visitor.visitChildren(self)
class ShowFunctionsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def FUNCTIONS(self):
return self.getToken(SqlBaseParser.FUNCTIONS, 0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowFunctions" ):
listener.enterShowFunctions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowFunctions" ):
listener.exitShowFunctions(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowFunctions" ):
return visitor.visitShowFunctions(self)
else:
return visitor.visitChildren(self)
class DescribeOutputContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DESCRIBE(self):
return self.getToken(SqlBaseParser.DESCRIBE, 0)
def OUTPUT(self):
return self.getToken(SqlBaseParser.OUTPUT, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDescribeOutput" ):
listener.enterDescribeOutput(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDescribeOutput" ):
listener.exitDescribeOutput(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDescribeOutput" ):
return visitor.visitDescribeOutput(self)
else:
return visitor.visitChildren(self)
class GrantContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.grantee = None # PrincipalContext
self.copyFrom(ctx)
def GRANT(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.GRANT)
else:
return self.getToken(SqlBaseParser.GRANT, i)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def privilege(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrivilegeContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrivilegeContext,i)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def PRIVILEGES(self):
return self.getToken(SqlBaseParser.PRIVILEGES, 0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrant" ):
listener.enterGrant(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrant" ):
listener.exitGrant(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrant" ):
return visitor.visitGrant(self)
else:
return visitor.visitChildren(self)
class SetTablePropertiesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def PROPERTIES(self):
return self.getToken(SqlBaseParser.PROPERTIES, 0)
def propertyAssignments(self):
return self.getTypedRuleContext(SqlBaseParser.PropertyAssignmentsContext,0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetTableProperties" ):
listener.enterSetTableProperties(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetTableProperties" ):
listener.exitSetTableProperties(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetTableProperties" ):
return visitor.visitSetTableProperties(self)
else:
return visitor.visitChildren(self)
def statement(self):
localctx = SqlBaseParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_statement)
self._la = 0 # Token type
try:
self.state = 953
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,104,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.StatementDefaultContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 197
self.query()
elif la_ == 2:
localctx = SqlBaseParser.UseContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 198
self.match(SqlBaseParser.USE)
self.state = 199
localctx.schema = self.identifier()
elif la_ == 3:
localctx = SqlBaseParser.UseContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 200
self.match(SqlBaseParser.USE)
self.state = 201
localctx.catalog = self.identifier()
self.state = 202
self.match(SqlBaseParser.T__0)
self.state = 203
localctx.schema = self.identifier()
elif la_ == 4:
localctx = SqlBaseParser.CreateSchemaContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 205
self.match(SqlBaseParser.CREATE)
self.state = 206
self.match(SqlBaseParser.SCHEMA)
self.state = 210
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,0,self._ctx)
if la_ == 1:
self.state = 207
self.match(SqlBaseParser.IF)
self.state = 208
self.match(SqlBaseParser.NOT)
self.state = 209
self.match(SqlBaseParser.EXISTS)
self.state = 212
self.qualifiedName()
self.state = 215
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AUTHORIZATION:
self.state = 213
self.match(SqlBaseParser.AUTHORIZATION)
self.state = 214
self.principal()
self.state = 219
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 217
self.match(SqlBaseParser.WITH)
self.state = 218
self.properties()
elif la_ == 5:
localctx = SqlBaseParser.DropSchemaContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 221
self.match(SqlBaseParser.DROP)
self.state = 222
self.match(SqlBaseParser.SCHEMA)
self.state = 225
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,3,self._ctx)
if la_ == 1:
self.state = 223
self.match(SqlBaseParser.IF)
self.state = 224
self.match(SqlBaseParser.EXISTS)
self.state = 227
self.qualifiedName()
self.state = 229
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.CASCADE or _la==SqlBaseParser.RESTRICT:
self.state = 228
_la = self._input.LA(1)
if not(_la==SqlBaseParser.CASCADE or _la==SqlBaseParser.RESTRICT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
elif la_ == 6:
localctx = SqlBaseParser.RenameSchemaContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 231
self.match(SqlBaseParser.ALTER)
self.state = 232
self.match(SqlBaseParser.SCHEMA)
self.state = 233
self.qualifiedName()
self.state = 234
self.match(SqlBaseParser.RENAME)
self.state = 235
self.match(SqlBaseParser.TO)
self.state = 236
self.identifier()
elif la_ == 7:
localctx = SqlBaseParser.SetSchemaAuthorizationContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 238
self.match(SqlBaseParser.ALTER)
self.state = 239
self.match(SqlBaseParser.SCHEMA)
self.state = 240
self.qualifiedName()
self.state = 241
self.match(SqlBaseParser.SET)
self.state = 242
self.match(SqlBaseParser.AUTHORIZATION)
self.state = 243
self.principal()
elif la_ == 8:
localctx = SqlBaseParser.CreateTableAsSelectContext(self, localctx)
self.enterOuterAlt(localctx, 8)
self.state = 245
self.match(SqlBaseParser.CREATE)
self.state = 246
self.match(SqlBaseParser.TABLE)
self.state = 250
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,5,self._ctx)
if la_ == 1:
self.state = 247
self.match(SqlBaseParser.IF)
self.state = 248
self.match(SqlBaseParser.NOT)
self.state = 249
self.match(SqlBaseParser.EXISTS)
self.state = 252
self.qualifiedName()
self.state = 254
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 253
self.columnAliases()
self.state = 258
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 256
self.match(SqlBaseParser.COMMENT)
self.state = 257
self.string()
self.state = 262
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 260
self.match(SqlBaseParser.WITH)
self.state = 261
self.properties()
self.state = 264
self.match(SqlBaseParser.AS)
self.state = 270
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,9,self._ctx)
if la_ == 1:
self.state = 265
self.query()
elif la_ == 2:
self.state = 266
self.match(SqlBaseParser.T__1)
self.state = 267
self.query()
self.state = 268
self.match(SqlBaseParser.T__2)
self.state = 277
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 272
self.match(SqlBaseParser.WITH)
self.state = 274
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NO:
self.state = 273
self.match(SqlBaseParser.NO)
self.state = 276
self.match(SqlBaseParser.DATA)
elif la_ == 9:
localctx = SqlBaseParser.CreateTableContext(self, localctx)
self.enterOuterAlt(localctx, 9)
self.state = 279
self.match(SqlBaseParser.CREATE)
self.state = 280
self.match(SqlBaseParser.TABLE)
self.state = 284
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,12,self._ctx)
if la_ == 1:
self.state = 281
self.match(SqlBaseParser.IF)
self.state = 282
self.match(SqlBaseParser.NOT)
self.state = 283
self.match(SqlBaseParser.EXISTS)
self.state = 286
self.qualifiedName()
self.state = 287
self.match(SqlBaseParser.T__1)
self.state = 288
self.tableElement()
self.state = 293
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 289
self.match(SqlBaseParser.T__3)
self.state = 290
self.tableElement()
self.state = 295
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 296
self.match(SqlBaseParser.T__2)
self.state = 299
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 297
self.match(SqlBaseParser.COMMENT)
self.state = 298
self.string()
self.state = 303
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 301
self.match(SqlBaseParser.WITH)
self.state = 302
self.properties()
elif la_ == 10:
localctx = SqlBaseParser.DropTableContext(self, localctx)
self.enterOuterAlt(localctx, 10)
self.state = 305
self.match(SqlBaseParser.DROP)
self.state = 306
self.match(SqlBaseParser.TABLE)
self.state = 309
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
self.state = 307
self.match(SqlBaseParser.IF)
self.state = 308
self.match(SqlBaseParser.EXISTS)
self.state = 311
self.qualifiedName()
elif la_ == 11:
localctx = SqlBaseParser.InsertIntoContext(self, localctx)
self.enterOuterAlt(localctx, 11)
self.state = 312
self.match(SqlBaseParser.INSERT)
self.state = 313
self.match(SqlBaseParser.INTO)
self.state = 314
self.qualifiedName()
self.state = 316
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,17,self._ctx)
if la_ == 1:
self.state = 315
self.columnAliases()
self.state = 318
self.query()
elif la_ == 12:
localctx = SqlBaseParser.DeleteContext(self, localctx)
self.enterOuterAlt(localctx, 12)
self.state = 320
self.match(SqlBaseParser.DELETE)
self.state = 321
self.match(SqlBaseParser.FROM)
self.state = 322
self.qualifiedName()
self.state = 325
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WHERE:
self.state = 323
self.match(SqlBaseParser.WHERE)
self.state = 324
self.booleanExpression(0)
elif la_ == 13:
localctx = SqlBaseParser.TruncateTableContext(self, localctx)
self.enterOuterAlt(localctx, 13)
self.state = 327
self.match(SqlBaseParser.TRUNCATE)
self.state = 328
self.match(SqlBaseParser.TABLE)
self.state = 329
self.qualifiedName()
elif la_ == 14:
localctx = SqlBaseParser.CommentTableContext(self, localctx)
self.enterOuterAlt(localctx, 14)
self.state = 330
self.match(SqlBaseParser.COMMENT)
self.state = 331
self.match(SqlBaseParser.ON)
self.state = 332
self.match(SqlBaseParser.TABLE)
self.state = 333
self.qualifiedName()
self.state = 334
self.match(SqlBaseParser.IS)
self.state = 337
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.STRING, SqlBaseParser.UNICODE_STRING]:
self.state = 335
self.string()
elif token in [SqlBaseParser.NULL]:
self.state = 336
self.match(SqlBaseParser.NULL)
else:
raise NoViableAltException(self)
elif la_ == 15:
localctx = SqlBaseParser.CommentColumnContext(self, localctx)
self.enterOuterAlt(localctx, 15)
self.state = 339
self.match(SqlBaseParser.COMMENT)
self.state = 340
self.match(SqlBaseParser.ON)
self.state = 341
self.match(SqlBaseParser.COLUMN)
self.state = 342
self.qualifiedName()
self.state = 343
self.match(SqlBaseParser.IS)
self.state = 346
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.STRING, SqlBaseParser.UNICODE_STRING]:
self.state = 344
self.string()
elif token in [SqlBaseParser.NULL]:
self.state = 345
self.match(SqlBaseParser.NULL)
else:
raise NoViableAltException(self)
elif la_ == 16:
localctx = SqlBaseParser.RenameTableContext(self, localctx)
self.enterOuterAlt(localctx, 16)
self.state = 348
self.match(SqlBaseParser.ALTER)
self.state = 349
self.match(SqlBaseParser.TABLE)
self.state = 352
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,21,self._ctx)
if la_ == 1:
self.state = 350
self.match(SqlBaseParser.IF)
self.state = 351
self.match(SqlBaseParser.EXISTS)
self.state = 354
localctx.from_ = self.qualifiedName()
self.state = 355
self.match(SqlBaseParser.RENAME)
self.state = 356
self.match(SqlBaseParser.TO)
self.state = 357
localctx.to = self.qualifiedName()
elif la_ == 17:
localctx = SqlBaseParser.AddColumnContext(self, localctx)
self.enterOuterAlt(localctx, 17)
self.state = 359
self.match(SqlBaseParser.ALTER)
self.state = 360
self.match(SqlBaseParser.TABLE)
self.state = 363
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,22,self._ctx)
if la_ == 1:
self.state = 361
self.match(SqlBaseParser.IF)
self.state = 362
self.match(SqlBaseParser.EXISTS)
self.state = 365
localctx.tableName = self.qualifiedName()
self.state = 366
self.match(SqlBaseParser.ADD)
self.state = 367
self.match(SqlBaseParser.COLUMN)
self.state = 371
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,23,self._ctx)
if la_ == 1:
self.state = 368
self.match(SqlBaseParser.IF)
self.state = 369
self.match(SqlBaseParser.NOT)
self.state = 370
self.match(SqlBaseParser.EXISTS)
self.state = 373
localctx.column = self.columnDefinition()
elif la_ == 18:
localctx = SqlBaseParser.RenameColumnContext(self, localctx)
self.enterOuterAlt(localctx, 18)
self.state = 375
self.match(SqlBaseParser.ALTER)
self.state = 376
self.match(SqlBaseParser.TABLE)
self.state = 379
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,24,self._ctx)
if la_ == 1:
self.state = 377
self.match(SqlBaseParser.IF)
self.state = 378
self.match(SqlBaseParser.EXISTS)
self.state = 381
localctx.tableName = self.qualifiedName()
self.state = 382
self.match(SqlBaseParser.RENAME)
self.state = 383
self.match(SqlBaseParser.COLUMN)
self.state = 386
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,25,self._ctx)
if la_ == 1:
self.state = 384
self.match(SqlBaseParser.IF)
self.state = 385
self.match(SqlBaseParser.EXISTS)
self.state = 388
localctx.from_ = self.identifier()
self.state = 389
self.match(SqlBaseParser.TO)
self.state = 390
localctx.to = self.identifier()
elif la_ == 19:
localctx = SqlBaseParser.DropColumnContext(self, localctx)
self.enterOuterAlt(localctx, 19)
self.state = 392
self.match(SqlBaseParser.ALTER)
self.state = 393
self.match(SqlBaseParser.TABLE)
self.state = 396
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,26,self._ctx)
if la_ == 1:
self.state = 394
self.match(SqlBaseParser.IF)
self.state = 395
self.match(SqlBaseParser.EXISTS)
self.state = 398
localctx.tableName = self.qualifiedName()
self.state = 399
self.match(SqlBaseParser.DROP)
self.state = 400
self.match(SqlBaseParser.COLUMN)
self.state = 403
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,27,self._ctx)
if la_ == 1:
self.state = 401
self.match(SqlBaseParser.IF)
self.state = 402
self.match(SqlBaseParser.EXISTS)
self.state = 405
localctx.column = self.qualifiedName()
elif la_ == 20:
localctx = SqlBaseParser.SetTableAuthorizationContext(self, localctx)
self.enterOuterAlt(localctx, 20)
self.state = 407
self.match(SqlBaseParser.ALTER)
self.state = 408
self.match(SqlBaseParser.TABLE)
self.state = 409
localctx.tableName = self.qualifiedName()
self.state = 410
self.match(SqlBaseParser.SET)
self.state = 411
self.match(SqlBaseParser.AUTHORIZATION)
self.state = 412
self.principal()
elif la_ == 21:
localctx = SqlBaseParser.SetTablePropertiesContext(self, localctx)
self.enterOuterAlt(localctx, 21)
self.state = 414
self.match(SqlBaseParser.ALTER)
self.state = 415
self.match(SqlBaseParser.TABLE)
self.state = 416
localctx.tableName = self.qualifiedName()
self.state = 417
self.match(SqlBaseParser.SET)
self.state = 418
self.match(SqlBaseParser.PROPERTIES)
self.state = 419
self.propertyAssignments()
elif la_ == 22:
localctx = SqlBaseParser.TableExecuteContext(self, localctx)
self.enterOuterAlt(localctx, 22)
self.state = 421
self.match(SqlBaseParser.ALTER)
self.state = 422
self.match(SqlBaseParser.TABLE)
self.state = 423
localctx.tableName = self.qualifiedName()
self.state = 424
self.match(SqlBaseParser.EXECUTE)
self.state = 425
localctx.procedureName = self.identifier()
self.state = 438
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 426
self.match(SqlBaseParser.T__1)
self.state = 435
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 427
self.callArgument()
self.state = 432
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 428
self.match(SqlBaseParser.T__3)
self.state = 429
self.callArgument()
self.state = 434
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 437
self.match(SqlBaseParser.T__2)
self.state = 442
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WHERE:
self.state = 440
self.match(SqlBaseParser.WHERE)
self.state = 441
localctx.where = self.booleanExpression(0)
elif la_ == 23:
localctx = SqlBaseParser.AnalyzeContext(self, localctx)
self.enterOuterAlt(localctx, 23)
self.state = 444
self.match(SqlBaseParser.ANALYZE)
self.state = 445
self.qualifiedName()
self.state = 448
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 446
self.match(SqlBaseParser.WITH)
self.state = 447
self.properties()
elif la_ == 24:
localctx = SqlBaseParser.CreateMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 24)
self.state = 450
self.match(SqlBaseParser.CREATE)
self.state = 453
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OR:
self.state = 451
self.match(SqlBaseParser.OR)
self.state = 452
self.match(SqlBaseParser.REPLACE)
self.state = 455
self.match(SqlBaseParser.MATERIALIZED)
self.state = 456
self.match(SqlBaseParser.VIEW)
self.state = 460
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,34,self._ctx)
if la_ == 1:
self.state = 457
self.match(SqlBaseParser.IF)
self.state = 458
self.match(SqlBaseParser.NOT)
self.state = 459
self.match(SqlBaseParser.EXISTS)
self.state = 462
self.qualifiedName()
self.state = 465
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 463
self.match(SqlBaseParser.COMMENT)
self.state = 464
self.string()
self.state = 469
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 467
self.match(SqlBaseParser.WITH)
self.state = 468
self.properties()
self.state = 471
self.match(SqlBaseParser.AS)
self.state = 472
self.query()
elif la_ == 25:
localctx = SqlBaseParser.CreateViewContext(self, localctx)
self.enterOuterAlt(localctx, 25)
self.state = 474
self.match(SqlBaseParser.CREATE)
self.state = 477
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OR:
self.state = 475
self.match(SqlBaseParser.OR)
self.state = 476
self.match(SqlBaseParser.REPLACE)
self.state = 479
self.match(SqlBaseParser.VIEW)
self.state = 480
self.qualifiedName()
self.state = 483
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 481
self.match(SqlBaseParser.COMMENT)
self.state = 482
self.string()
self.state = 487
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.SECURITY:
self.state = 485
self.match(SqlBaseParser.SECURITY)
self.state = 486
_la = self._input.LA(1)
if not(_la==SqlBaseParser.DEFINER or _la==SqlBaseParser.INVOKER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 489
self.match(SqlBaseParser.AS)
self.state = 490
self.query()
elif la_ == 26:
localctx = SqlBaseParser.RefreshMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 26)
self.state = 492
self.match(SqlBaseParser.REFRESH)
self.state = 493
self.match(SqlBaseParser.MATERIALIZED)
self.state = 494
self.match(SqlBaseParser.VIEW)
self.state = 495
self.qualifiedName()
elif la_ == 27:
localctx = SqlBaseParser.DropMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 27)
self.state = 496
self.match(SqlBaseParser.DROP)
self.state = 497
self.match(SqlBaseParser.MATERIALIZED)
self.state = 498
self.match(SqlBaseParser.VIEW)
self.state = 501
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,40,self._ctx)
if la_ == 1:
self.state = 499
self.match(SqlBaseParser.IF)
self.state = 500
self.match(SqlBaseParser.EXISTS)
self.state = 503
self.qualifiedName()
elif la_ == 28:
localctx = SqlBaseParser.RenameMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 28)
self.state = 504
self.match(SqlBaseParser.ALTER)
self.state = 505
self.match(SqlBaseParser.MATERIALIZED)
self.state = 506
self.match(SqlBaseParser.VIEW)
self.state = 509
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,41,self._ctx)
if la_ == 1:
self.state = 507
self.match(SqlBaseParser.IF)
self.state = 508
self.match(SqlBaseParser.EXISTS)
self.state = 511
localctx.from_ = self.qualifiedName()
self.state = 512
self.match(SqlBaseParser.RENAME)
self.state = 513
self.match(SqlBaseParser.TO)
self.state = 514
localctx.to = self.qualifiedName()
elif la_ == 29:
localctx = SqlBaseParser.DropViewContext(self, localctx)
self.enterOuterAlt(localctx, 29)
self.state = 516
self.match(SqlBaseParser.DROP)
self.state = 517
self.match(SqlBaseParser.VIEW)
self.state = 520
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,42,self._ctx)
if la_ == 1:
self.state = 518
self.match(SqlBaseParser.IF)
self.state = 519
self.match(SqlBaseParser.EXISTS)
self.state = 522
self.qualifiedName()
elif la_ == 30:
localctx = SqlBaseParser.RenameViewContext(self, localctx)
self.enterOuterAlt(localctx, 30)
self.state = 523
self.match(SqlBaseParser.ALTER)
self.state = 524
self.match(SqlBaseParser.VIEW)
self.state = 525
localctx.from_ = self.qualifiedName()
self.state = 526
self.match(SqlBaseParser.RENAME)
self.state = 527
self.match(SqlBaseParser.TO)
self.state = 528
localctx.to = self.qualifiedName()
elif la_ == 31:
localctx = SqlBaseParser.SetViewAuthorizationContext(self, localctx)
self.enterOuterAlt(localctx, 31)
self.state = 530
self.match(SqlBaseParser.ALTER)
self.state = 531
self.match(SqlBaseParser.VIEW)
self.state = 532
localctx.from_ = self.qualifiedName()
self.state = 533
self.match(SqlBaseParser.SET)
self.state = 534
self.match(SqlBaseParser.AUTHORIZATION)
self.state = 535
self.principal()
elif la_ == 32:
localctx = SqlBaseParser.CallContext(self, localctx)
self.enterOuterAlt(localctx, 32)
self.state = 537
self.match(SqlBaseParser.CALL)
self.state = 538
self.qualifiedName()
self.state = 539
self.match(SqlBaseParser.T__1)
self.state = 548
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 540
self.callArgument()
self.state = 545
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 541
self.match(SqlBaseParser.T__3)
self.state = 542
self.callArgument()
self.state = 547
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 550
self.match(SqlBaseParser.T__2)
elif la_ == 33:
localctx = SqlBaseParser.CreateRoleContext(self, localctx)
self.enterOuterAlt(localctx, 33)
self.state = 552
self.match(SqlBaseParser.CREATE)
self.state = 553
self.match(SqlBaseParser.ROLE)
self.state = 554
localctx.name = self.identifier()
self.state = 558
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 555
self.match(SqlBaseParser.WITH)
self.state = 556
self.match(SqlBaseParser.ADMIN)
self.state = 557
self.grantor()
self.state = 562
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 560
self.match(SqlBaseParser.IN)
self.state = 561
localctx.catalog = self.identifier()
elif la_ == 34:
localctx = SqlBaseParser.DropRoleContext(self, localctx)
self.enterOuterAlt(localctx, 34)
self.state = 564
self.match(SqlBaseParser.DROP)
self.state = 565
self.match(SqlBaseParser.ROLE)
self.state = 566
localctx.name = self.identifier()
self.state = 569
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 567
self.match(SqlBaseParser.IN)
self.state = 568
localctx.catalog = self.identifier()
elif la_ == 35:
localctx = SqlBaseParser.GrantRolesContext(self, localctx)
self.enterOuterAlt(localctx, 35)
self.state = 571
self.match(SqlBaseParser.GRANT)
self.state = 572
self.roles()
self.state = 573
self.match(SqlBaseParser.TO)
self.state = 574
self.principal()
self.state = 579
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 575
self.match(SqlBaseParser.T__3)
self.state = 576
self.principal()
self.state = 581
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 585
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 582
self.match(SqlBaseParser.WITH)
self.state = 583
self.match(SqlBaseParser.ADMIN)
self.state = 584
self.match(SqlBaseParser.OPTION)
self.state = 590
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.GRANTED:
self.state = 587
self.match(SqlBaseParser.GRANTED)
self.state = 588
self.match(SqlBaseParser.BY)
self.state = 589
self.grantor()
self.state = 594
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 592
self.match(SqlBaseParser.IN)
self.state = 593
localctx.catalog = self.identifier()
elif la_ == 36:
localctx = SqlBaseParser.RevokeRolesContext(self, localctx)
self.enterOuterAlt(localctx, 36)
self.state = 596
self.match(SqlBaseParser.REVOKE)
self.state = 600
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,52,self._ctx)
if la_ == 1:
self.state = 597
self.match(SqlBaseParser.ADMIN)
self.state = 598
self.match(SqlBaseParser.OPTION)
self.state = 599
self.match(SqlBaseParser.FOR)
self.state = 602
self.roles()
self.state = 603
self.match(SqlBaseParser.FROM)
self.state = 604
self.principal()
self.state = 609
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 605
self.match(SqlBaseParser.T__3)
self.state = 606
self.principal()
self.state = 611
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 615
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.GRANTED:
self.state = 612
self.match(SqlBaseParser.GRANTED)
self.state = 613
self.match(SqlBaseParser.BY)
self.state = 614
self.grantor()
self.state = 619
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 617
self.match(SqlBaseParser.IN)
self.state = 618
localctx.catalog = self.identifier()
elif la_ == 37:
localctx = SqlBaseParser.SetRoleContext(self, localctx)
self.enterOuterAlt(localctx, 37)
self.state = 621
self.match(SqlBaseParser.SET)
self.state = 622
self.match(SqlBaseParser.ROLE)
self.state = 626
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,56,self._ctx)
if la_ == 1:
self.state = 623
self.match(SqlBaseParser.ALL)
elif la_ == 2:
self.state = 624
self.match(SqlBaseParser.NONE)
elif la_ == 3:
self.state = 625
localctx.role = self.identifier()
self.state = 630
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 628
self.match(SqlBaseParser.IN)
self.state = 629
localctx.catalog = self.identifier()
elif la_ == 38:
localctx = SqlBaseParser.GrantContext(self, localctx)
self.enterOuterAlt(localctx, 38)
self.state = 632
self.match(SqlBaseParser.GRANT)
self.state = 643
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.CREATE, SqlBaseParser.DELETE, SqlBaseParser.INSERT, SqlBaseParser.SELECT, SqlBaseParser.UPDATE]:
self.state = 633
self.privilege()
self.state = 638
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 634
self.match(SqlBaseParser.T__3)
self.state = 635
self.privilege()
self.state = 640
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [SqlBaseParser.ALL]:
self.state = 641
self.match(SqlBaseParser.ALL)
self.state = 642
self.match(SqlBaseParser.PRIVILEGES)
else:
raise NoViableAltException(self)
self.state = 645
self.match(SqlBaseParser.ON)
self.state = 647
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,60,self._ctx)
if la_ == 1:
self.state = 646
_la = self._input.LA(1)
if not(_la==SqlBaseParser.SCHEMA or _la==SqlBaseParser.TABLE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 649
self.qualifiedName()
self.state = 650
self.match(SqlBaseParser.TO)
self.state = 651
localctx.grantee = self.principal()
self.state = 655
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 652
self.match(SqlBaseParser.WITH)
self.state = 653
self.match(SqlBaseParser.GRANT)
self.state = 654
self.match(SqlBaseParser.OPTION)
elif la_ == 39:
localctx = SqlBaseParser.DenyContext(self, localctx)
self.enterOuterAlt(localctx, 39)
self.state = 657
self.match(SqlBaseParser.DENY)
self.state = 668
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.CREATE, SqlBaseParser.DELETE, SqlBaseParser.INSERT, SqlBaseParser.SELECT, SqlBaseParser.UPDATE]:
self.state = 658
self.privilege()
self.state = 663
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 659
self.match(SqlBaseParser.T__3)
self.state = 660
self.privilege()
self.state = 665
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [SqlBaseParser.ALL]:
self.state = 666
self.match(SqlBaseParser.ALL)
self.state = 667
self.match(SqlBaseParser.PRIVILEGES)
else:
raise NoViableAltException(self)
self.state = 670
self.match(SqlBaseParser.ON)
self.state = 672
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,64,self._ctx)
if la_ == 1:
self.state = 671
_la = self._input.LA(1)
if not(_la==SqlBaseParser.SCHEMA or _la==SqlBaseParser.TABLE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 674
self.qualifiedName()
self.state = 675
self.match(SqlBaseParser.TO)
self.state = 676
localctx.grantee = self.principal()
elif la_ == 40:
localctx = SqlBaseParser.RevokeContext(self, localctx)
self.enterOuterAlt(localctx, 40)
self.state = 678
self.match(SqlBaseParser.REVOKE)
self.state = 682
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.GRANT:
self.state = 679
self.match(SqlBaseParser.GRANT)
self.state = 680
self.match(SqlBaseParser.OPTION)
self.state = 681
self.match(SqlBaseParser.FOR)
self.state = 694
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.CREATE, SqlBaseParser.DELETE, SqlBaseParser.INSERT, SqlBaseParser.SELECT, SqlBaseParser.UPDATE]:
self.state = 684
self.privilege()
self.state = 689
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 685
self.match(SqlBaseParser.T__3)
self.state = 686
self.privilege()
self.state = 691
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [SqlBaseParser.ALL]:
self.state = 692
self.match(SqlBaseParser.ALL)
self.state = 693
self.match(SqlBaseParser.PRIVILEGES)
else:
raise NoViableAltException(self)
self.state = 696
self.match(SqlBaseParser.ON)
self.state = 698
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,68,self._ctx)
if la_ == 1:
self.state = 697
_la = self._input.LA(1)
if not(_la==SqlBaseParser.SCHEMA or _la==SqlBaseParser.TABLE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 700
self.qualifiedName()
self.state = 701
self.match(SqlBaseParser.FROM)
self.state = 702
localctx.grantee = self.principal()
elif la_ == 41:
localctx = SqlBaseParser.ShowGrantsContext(self, localctx)
self.enterOuterAlt(localctx, 41)
self.state = 704
self.match(SqlBaseParser.SHOW)
self.state = 705
self.match(SqlBaseParser.GRANTS)
self.state = 711
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ON:
self.state = 706
self.match(SqlBaseParser.ON)
self.state = 708
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.TABLE:
self.state = 707
self.match(SqlBaseParser.TABLE)
self.state = 710
self.qualifiedName()
elif la_ == 42:
localctx = SqlBaseParser.ExplainContext(self, localctx)
self.enterOuterAlt(localctx, 42)
self.state = 713
self.match(SqlBaseParser.EXPLAIN)
self.state = 725
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,72,self._ctx)
if la_ == 1:
self.state = 714
self.match(SqlBaseParser.T__1)
self.state = 715
self.explainOption()
self.state = 720
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 716
self.match(SqlBaseParser.T__3)
self.state = 717
self.explainOption()
self.state = 722
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 723
self.match(SqlBaseParser.T__2)
self.state = 727
self.statement()
elif la_ == 43:
localctx = SqlBaseParser.ExplainAnalyzeContext(self, localctx)
self.enterOuterAlt(localctx, 43)
self.state = 728
self.match(SqlBaseParser.EXPLAIN)
self.state = 729
self.match(SqlBaseParser.ANALYZE)
self.state = 731
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.VERBOSE:
self.state = 730
self.match(SqlBaseParser.VERBOSE)
self.state = 733
self.statement()
elif la_ == 44:
localctx = SqlBaseParser.ShowCreateTableContext(self, localctx)
self.enterOuterAlt(localctx, 44)
self.state = 734
self.match(SqlBaseParser.SHOW)
self.state = 735
self.match(SqlBaseParser.CREATE)
self.state = 736
self.match(SqlBaseParser.TABLE)
self.state = 737
self.qualifiedName()
elif la_ == 45:
localctx = SqlBaseParser.ShowCreateSchemaContext(self, localctx)
self.enterOuterAlt(localctx, 45)
self.state = 738
self.match(SqlBaseParser.SHOW)
self.state = 739
self.match(SqlBaseParser.CREATE)
self.state = 740
self.match(SqlBaseParser.SCHEMA)
self.state = 741
self.qualifiedName()
elif la_ == 46:
localctx = SqlBaseParser.ShowCreateViewContext(self, localctx)
self.enterOuterAlt(localctx, 46)
self.state = 742
self.match(SqlBaseParser.SHOW)
self.state = 743
self.match(SqlBaseParser.CREATE)
self.state = 744
self.match(SqlBaseParser.VIEW)
self.state = 745
self.qualifiedName()
elif la_ == 47:
localctx = SqlBaseParser.ShowCreateMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 47)
self.state = 746
self.match(SqlBaseParser.SHOW)
self.state = 747
self.match(SqlBaseParser.CREATE)
self.state = 748
self.match(SqlBaseParser.MATERIALIZED)
self.state = 749
self.match(SqlBaseParser.VIEW)
self.state = 750
self.qualifiedName()
elif la_ == 48:
localctx = SqlBaseParser.ShowTablesContext(self, localctx)
self.enterOuterAlt(localctx, 48)
self.state = 751
self.match(SqlBaseParser.SHOW)
self.state = 752
self.match(SqlBaseParser.TABLES)
self.state = 755
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FROM or _la==SqlBaseParser.IN:
self.state = 753
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 754
self.qualifiedName()
self.state = 763
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 757
self.match(SqlBaseParser.LIKE)
self.state = 758
localctx.pattern = self.string()
self.state = 761
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 759
self.match(SqlBaseParser.ESCAPE)
self.state = 760
localctx.escape = self.string()
elif la_ == 49:
localctx = SqlBaseParser.ShowSchemasContext(self, localctx)
self.enterOuterAlt(localctx, 49)
self.state = 765
self.match(SqlBaseParser.SHOW)
self.state = 766
self.match(SqlBaseParser.SCHEMAS)
self.state = 769
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FROM or _la==SqlBaseParser.IN:
self.state = 767
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 768
self.identifier()
self.state = 777
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 771
self.match(SqlBaseParser.LIKE)
self.state = 772
localctx.pattern = self.string()
self.state = 775
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 773
self.match(SqlBaseParser.ESCAPE)
self.state = 774
localctx.escape = self.string()
elif la_ == 50:
localctx = SqlBaseParser.ShowCatalogsContext(self, localctx)
self.enterOuterAlt(localctx, 50)
self.state = 779
self.match(SqlBaseParser.SHOW)
self.state = 780
self.match(SqlBaseParser.CATALOGS)
self.state = 787
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 781
self.match(SqlBaseParser.LIKE)
self.state = 782
localctx.pattern = self.string()
self.state = 785
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 783
self.match(SqlBaseParser.ESCAPE)
self.state = 784
localctx.escape = self.string()
elif la_ == 51:
localctx = SqlBaseParser.ShowColumnsContext(self, localctx)
self.enterOuterAlt(localctx, 51)
self.state = 789
self.match(SqlBaseParser.SHOW)
self.state = 790
self.match(SqlBaseParser.COLUMNS)
self.state = 791
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 793
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 792
self.qualifiedName()
self.state = 801
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 795
self.match(SqlBaseParser.LIKE)
self.state = 796
localctx.pattern = self.string()
self.state = 799
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 797
self.match(SqlBaseParser.ESCAPE)
self.state = 798
localctx.escape = self.string()
elif la_ == 52:
localctx = SqlBaseParser.ShowStatsContext(self, localctx)
self.enterOuterAlt(localctx, 52)
self.state = 803
self.match(SqlBaseParser.SHOW)
self.state = 804
self.match(SqlBaseParser.STATS)
self.state = 805
self.match(SqlBaseParser.FOR)
self.state = 806
self.qualifiedName()
elif la_ == 53:
localctx = SqlBaseParser.ShowStatsForQueryContext(self, localctx)
self.enterOuterAlt(localctx, 53)
self.state = 807
self.match(SqlBaseParser.SHOW)
self.state = 808
self.match(SqlBaseParser.STATS)
self.state = 809
self.match(SqlBaseParser.FOR)
self.state = 810
self.match(SqlBaseParser.T__1)
self.state = 811
self.query()
self.state = 812
self.match(SqlBaseParser.T__2)
elif la_ == 54:
localctx = SqlBaseParser.ShowRolesContext(self, localctx)
self.enterOuterAlt(localctx, 54)
self.state = 814
self.match(SqlBaseParser.SHOW)
self.state = 816
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.CURRENT:
self.state = 815
self.match(SqlBaseParser.CURRENT)
self.state = 818
self.match(SqlBaseParser.ROLES)
self.state = 821
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FROM or _la==SqlBaseParser.IN:
self.state = 819
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 820
self.identifier()
elif la_ == 55:
localctx = SqlBaseParser.ShowRoleGrantsContext(self, localctx)
self.enterOuterAlt(localctx, 55)
self.state = 823
self.match(SqlBaseParser.SHOW)
self.state = 824
self.match(SqlBaseParser.ROLE)
self.state = 825
self.match(SqlBaseParser.GRANTS)
self.state = 828
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FROM or _la==SqlBaseParser.IN:
self.state = 826
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 827
self.identifier()
elif la_ == 56:
localctx = SqlBaseParser.ShowColumnsContext(self, localctx)
self.enterOuterAlt(localctx, 56)
self.state = 830
self.match(SqlBaseParser.DESCRIBE)
self.state = 831
self.qualifiedName()
elif la_ == 57:
localctx = SqlBaseParser.ShowColumnsContext(self, localctx)
self.enterOuterAlt(localctx, 57)
self.state = 832
self.match(SqlBaseParser.DESC)
self.state = 833
self.qualifiedName()
elif la_ == 58:
localctx = SqlBaseParser.ShowFunctionsContext(self, localctx)
self.enterOuterAlt(localctx, 58)
self.state = 834
self.match(SqlBaseParser.SHOW)
self.state = 835
self.match(SqlBaseParser.FUNCTIONS)
self.state = 842
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 836
self.match(SqlBaseParser.LIKE)
self.state = 837
localctx.pattern = self.string()
self.state = 840
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 838
self.match(SqlBaseParser.ESCAPE)
self.state = 839
localctx.escape = self.string()
elif la_ == 59:
localctx = SqlBaseParser.ShowSessionContext(self, localctx)
self.enterOuterAlt(localctx, 59)
self.state = 844
self.match(SqlBaseParser.SHOW)
self.state = 845
self.match(SqlBaseParser.SESSION)
self.state = 852
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 846
self.match(SqlBaseParser.LIKE)
self.state = 847
localctx.pattern = self.string()
self.state = 850
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 848
self.match(SqlBaseParser.ESCAPE)
self.state = 849
localctx.escape = self.string()
elif la_ == 60:
localctx = SqlBaseParser.SetSessionContext(self, localctx)
self.enterOuterAlt(localctx, 60)
self.state = 854
self.match(SqlBaseParser.SET)
self.state = 855
self.match(SqlBaseParser.SESSION)
self.state = 856
self.qualifiedName()
self.state = 857
self.match(SqlBaseParser.EQ)
self.state = 858
self.expression()
elif la_ == 61:
localctx = SqlBaseParser.ResetSessionContext(self, localctx)
self.enterOuterAlt(localctx, 61)
self.state = 860
self.match(SqlBaseParser.RESET)
self.state = 861
self.match(SqlBaseParser.SESSION)
self.state = 862
self.qualifiedName()
elif la_ == 62:
localctx = SqlBaseParser.StartTransactionContext(self, localctx)
self.enterOuterAlt(localctx, 62)
self.state = 863
self.match(SqlBaseParser.START)
self.state = 864
self.match(SqlBaseParser.TRANSACTION)
self.state = 873
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ISOLATION or _la==SqlBaseParser.READ:
self.state = 865
self.transactionMode()
self.state = 870
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 866
self.match(SqlBaseParser.T__3)
self.state = 867
self.transactionMode()
self.state = 872
self._errHandler.sync(self)
_la = self._input.LA(1)
elif la_ == 63:
localctx = SqlBaseParser.CommitContext(self, localctx)
self.enterOuterAlt(localctx, 63)
self.state = 875
self.match(SqlBaseParser.COMMIT)
self.state = 877
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WORK:
self.state = 876
self.match(SqlBaseParser.WORK)
elif la_ == 64:
localctx = SqlBaseParser.RollbackContext(self, localctx)
self.enterOuterAlt(localctx, 64)
self.state = 879
self.match(SqlBaseParser.ROLLBACK)
self.state = 881
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WORK:
self.state = 880
self.match(SqlBaseParser.WORK)
elif la_ == 65:
localctx = SqlBaseParser.PrepareContext(self, localctx)
self.enterOuterAlt(localctx, 65)
self.state = 883
self.match(SqlBaseParser.PREPARE)
self.state = 884
self.identifier()
self.state = 885
self.match(SqlBaseParser.FROM)
self.state = 886
self.statement()
elif la_ == 66:
localctx = SqlBaseParser.DeallocateContext(self, localctx)
self.enterOuterAlt(localctx, 66)
self.state = 888
self.match(SqlBaseParser.DEALLOCATE)
self.state = 889
self.match(SqlBaseParser.PREPARE)
self.state = 890
self.identifier()
elif la_ == 67:
localctx = SqlBaseParser.ExecuteContext(self, localctx)
self.enterOuterAlt(localctx, 67)
self.state = 891
self.match(SqlBaseParser.EXECUTE)
self.state = 892
self.identifier()
self.state = 902
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.USING:
self.state = 893
self.match(SqlBaseParser.USING)
self.state = 894
self.expression()
self.state = 899
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 895
self.match(SqlBaseParser.T__3)
self.state = 896
self.expression()
self.state = 901
self._errHandler.sync(self)
_la = self._input.LA(1)
elif la_ == 68:
localctx = SqlBaseParser.DescribeInputContext(self, localctx)
self.enterOuterAlt(localctx, 68)
self.state = 904
self.match(SqlBaseParser.DESCRIBE)
self.state = 905
self.match(SqlBaseParser.INPUT)
self.state = 906
self.identifier()
elif la_ == 69:
localctx = SqlBaseParser.DescribeOutputContext(self, localctx)
self.enterOuterAlt(localctx, 69)
self.state = 907
self.match(SqlBaseParser.DESCRIBE)
self.state = 908
self.match(SqlBaseParser.OUTPUT)
self.state = 909
self.identifier()
elif la_ == 70:
localctx = SqlBaseParser.SetPathContext(self, localctx)
self.enterOuterAlt(localctx, 70)
self.state = 910
self.match(SqlBaseParser.SET)
self.state = 911
self.match(SqlBaseParser.PATH)
self.state = 912
self.pathSpecification()
elif la_ == 71:
localctx = SqlBaseParser.SetTimeZoneContext(self, localctx)
self.enterOuterAlt(localctx, 71)
self.state = 913
self.match(SqlBaseParser.SET)
self.state = 914
self.match(SqlBaseParser.TIME)
self.state = 915
self.match(SqlBaseParser.ZONE)
self.state = 918
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,98,self._ctx)
if la_ == 1:
self.state = 916
self.match(SqlBaseParser.LOCAL)
elif la_ == 2:
self.state = 917
self.expression()
elif la_ == 72:
localctx = SqlBaseParser.UpdateContext(self, localctx)
self.enterOuterAlt(localctx, 72)
self.state = 920
self.match(SqlBaseParser.UPDATE)
self.state = 921
self.qualifiedName()
self.state = 922
self.match(SqlBaseParser.SET)
self.state = 923
self.updateAssignment()
self.state = 928
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 924
self.match(SqlBaseParser.T__3)
self.state = 925
self.updateAssignment()
self.state = 930
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 933
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WHERE:
self.state = 931
self.match(SqlBaseParser.WHERE)
self.state = 932
localctx.where = self.booleanExpression(0)
elif la_ == 73:
localctx = SqlBaseParser.MergeContext(self, localctx)
self.enterOuterAlt(localctx, 73)
self.state = 935
self.match(SqlBaseParser.MERGE)
self.state = 936
self.match(SqlBaseParser.INTO)
self.state = 937
self.qualifiedName()
self.state = 942
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.AS) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 939
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AS:
self.state = 938
self.match(SqlBaseParser.AS)
self.state = 941
self.identifier()
self.state = 944
self.match(SqlBaseParser.USING)
self.state = 945
self.relation(0)
self.state = 946
self.match(SqlBaseParser.ON)
self.state = 947
self.expression()
self.state = 949
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 948
self.mergeCase()
self.state = 951
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==SqlBaseParser.WHEN):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def queryNoWith(self):
return self.getTypedRuleContext(SqlBaseParser.QueryNoWithContext,0)
def with_(self):
return self.getTypedRuleContext(SqlBaseParser.With_Context,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_query
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuery" ):
listener.enterQuery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuery" ):
listener.exitQuery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuery" ):
return visitor.visitQuery(self)
else:
return visitor.visitChildren(self)
def query(self):
localctx = SqlBaseParser.QueryContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_query)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 956
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 955
self.with_()
self.state = 958
self.queryNoWith()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class With_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def namedQuery(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.NamedQueryContext)
else:
return self.getTypedRuleContext(SqlBaseParser.NamedQueryContext,i)
def RECURSIVE(self):
return self.getToken(SqlBaseParser.RECURSIVE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_with_
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWith_" ):
listener.enterWith_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWith_" ):
listener.exitWith_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWith_" ):
return visitor.visitWith_(self)
else:
return visitor.visitChildren(self)
def with_(self):
localctx = SqlBaseParser.With_Context(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_with_)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 960
self.match(SqlBaseParser.WITH)
self.state = 962
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.RECURSIVE:
self.state = 961
self.match(SqlBaseParser.RECURSIVE)
self.state = 964
self.namedQuery()
self.state = 969
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 965
self.match(SqlBaseParser.T__3)
self.state = 966
self.namedQuery()
self.state = 971
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TableElementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def columnDefinition(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnDefinitionContext,0)
def likeClause(self):
return self.getTypedRuleContext(SqlBaseParser.LikeClauseContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_tableElement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTableElement" ):
listener.enterTableElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTableElement" ):
listener.exitTableElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTableElement" ):
return visitor.visitTableElement(self)
else:
return visitor.visitChildren(self)
def tableElement(self):
localctx = SqlBaseParser.TableElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_tableElement)
try:
self.state = 974
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 972
self.columnDefinition()
elif token in [SqlBaseParser.LIKE]:
self.enterOuterAlt(localctx, 2)
self.state = 973
self.likeClause()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ColumnDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_columnDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterColumnDefinition" ):
listener.enterColumnDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitColumnDefinition" ):
listener.exitColumnDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitColumnDefinition" ):
return visitor.visitColumnDefinition(self)
else:
return visitor.visitChildren(self)
def columnDefinition(self):
localctx = SqlBaseParser.ColumnDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_columnDefinition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 976
self.identifier()
self.state = 977
self.type_(0)
self.state = 980
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 978
self.match(SqlBaseParser.NOT)
self.state = 979
self.match(SqlBaseParser.NULL)
self.state = 984
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 982
self.match(SqlBaseParser.COMMENT)
self.state = 983
self.string()
self.state = 988
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 986
self.match(SqlBaseParser.WITH)
self.state = 987
self.properties()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LikeClauseContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.optionType = None # Token
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def PROPERTIES(self):
return self.getToken(SqlBaseParser.PROPERTIES, 0)
def INCLUDING(self):
return self.getToken(SqlBaseParser.INCLUDING, 0)
def EXCLUDING(self):
return self.getToken(SqlBaseParser.EXCLUDING, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_likeClause
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLikeClause" ):
listener.enterLikeClause(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLikeClause" ):
listener.exitLikeClause(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLikeClause" ):
return visitor.visitLikeClause(self)
else:
return visitor.visitChildren(self)
def likeClause(self):
localctx = SqlBaseParser.LikeClauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_likeClause)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 990
self.match(SqlBaseParser.LIKE)
self.state = 991
self.qualifiedName()
self.state = 994
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.EXCLUDING or _la==SqlBaseParser.INCLUDING:
self.state = 992
localctx.optionType = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.EXCLUDING or _la==SqlBaseParser.INCLUDING):
localctx.optionType = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 993
self.match(SqlBaseParser.PROPERTIES)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PropertiesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def propertyAssignments(self):
return self.getTypedRuleContext(SqlBaseParser.PropertyAssignmentsContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_properties
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProperties" ):
listener.enterProperties(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProperties" ):
listener.exitProperties(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProperties" ):
return visitor.visitProperties(self)
else:
return visitor.visitChildren(self)
def properties(self):
localctx = SqlBaseParser.PropertiesContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_properties)
try:
self.enterOuterAlt(localctx, 1)
self.state = 996
self.match(SqlBaseParser.T__1)
self.state = 997
self.propertyAssignments()
self.state = 998
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PropertyAssignmentsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def property_(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.Property_Context)
else:
return self.getTypedRuleContext(SqlBaseParser.Property_Context,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_propertyAssignments
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPropertyAssignments" ):
listener.enterPropertyAssignments(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPropertyAssignments" ):
listener.exitPropertyAssignments(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPropertyAssignments" ):
return visitor.visitPropertyAssignments(self)
else:
return visitor.visitChildren(self)
def propertyAssignments(self):
localctx = SqlBaseParser.PropertyAssignmentsContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_propertyAssignments)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1000
self.property_()
self.state = 1005
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1001
self.match(SqlBaseParser.T__3)
self.state = 1002
self.property_()
self.state = 1007
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Property_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_property_
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProperty_" ):
listener.enterProperty_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProperty_" ):
listener.exitProperty_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProperty_" ):
return visitor.visitProperty_(self)
else:
return visitor.visitChildren(self)
def property_(self):
localctx = SqlBaseParser.Property_Context(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_property_)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1008
self.identifier()
self.state = 1009
self.match(SqlBaseParser.EQ)
self.state = 1010
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryNoWithContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.offset = None # RowCountContext
self.limit = None # LimitRowCountContext
self.fetchFirst = None # RowCountContext
def queryTerm(self):
return self.getTypedRuleContext(SqlBaseParser.QueryTermContext,0)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def OFFSET(self):
return self.getToken(SqlBaseParser.OFFSET, 0)
def rowCount(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowCountContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowCountContext,i)
def LIMIT(self):
return self.getToken(SqlBaseParser.LIMIT, 0)
def FETCH(self):
return self.getToken(SqlBaseParser.FETCH, 0)
def limitRowCount(self):
return self.getTypedRuleContext(SqlBaseParser.LimitRowCountContext,0)
def FIRST(self):
return self.getToken(SqlBaseParser.FIRST, 0)
def NEXT(self):
return self.getToken(SqlBaseParser.NEXT, 0)
def ROW(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.ROW)
else:
return self.getToken(SqlBaseParser.ROW, i)
def ROWS(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.ROWS)
else:
return self.getToken(SqlBaseParser.ROWS, i)
def ONLY(self):
return self.getToken(SqlBaseParser.ONLY, 0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def TIES(self):
return self.getToken(SqlBaseParser.TIES, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_queryNoWith
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQueryNoWith" ):
listener.enterQueryNoWith(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQueryNoWith" ):
listener.exitQueryNoWith(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQueryNoWith" ):
return visitor.visitQueryNoWith(self)
else:
return visitor.visitChildren(self)
def queryNoWith(self):
localctx = SqlBaseParser.QueryNoWithContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_queryNoWith)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1012
self.queryTerm(0)
self.state = 1023
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ORDER:
self.state = 1013
self.match(SqlBaseParser.ORDER)
self.state = 1014
self.match(SqlBaseParser.BY)
self.state = 1015
self.sortItem()
self.state = 1020
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1016
self.match(SqlBaseParser.T__3)
self.state = 1017
self.sortItem()
self.state = 1022
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1030
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OFFSET:
self.state = 1025
self.match(SqlBaseParser.OFFSET)
self.state = 1026
localctx.offset = self.rowCount()
self.state = 1028
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ROW or _la==SqlBaseParser.ROWS:
self.state = 1027
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ROW or _la==SqlBaseParser.ROWS):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1045
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.LIMIT]:
self.state = 1032
self.match(SqlBaseParser.LIMIT)
self.state = 1033
localctx.limit = self.limitRowCount()
elif token in [SqlBaseParser.FETCH]:
self.state = 1034
self.match(SqlBaseParser.FETCH)
self.state = 1035
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FIRST or _la==SqlBaseParser.NEXT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1037
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.QUESTION_MARK or _la==SqlBaseParser.INTEGER_VALUE:
self.state = 1036
localctx.fetchFirst = self.rowCount()
self.state = 1039
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ROW or _la==SqlBaseParser.ROWS):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1043
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ONLY]:
self.state = 1040
self.match(SqlBaseParser.ONLY)
elif token in [SqlBaseParser.WITH]:
self.state = 1041
self.match(SqlBaseParser.WITH)
self.state = 1042
self.match(SqlBaseParser.TIES)
else:
raise NoViableAltException(self)
elif token in [SqlBaseParser.EOF, SqlBaseParser.T__2, SqlBaseParser.WITH]:
pass
else:
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LimitRowCountContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def rowCount(self):
return self.getTypedRuleContext(SqlBaseParser.RowCountContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_limitRowCount
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLimitRowCount" ):
listener.enterLimitRowCount(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLimitRowCount" ):
listener.exitLimitRowCount(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLimitRowCount" ):
return visitor.visitLimitRowCount(self)
else:
return visitor.visitChildren(self)
def limitRowCount(self):
localctx = SqlBaseParser.LimitRowCountContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_limitRowCount)
try:
self.state = 1049
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ALL]:
self.enterOuterAlt(localctx, 1)
self.state = 1047
self.match(SqlBaseParser.ALL)
elif token in [SqlBaseParser.QUESTION_MARK, SqlBaseParser.INTEGER_VALUE]:
self.enterOuterAlt(localctx, 2)
self.state = 1048
self.rowCount()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RowCountContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_rowCount
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowCount" ):
listener.enterRowCount(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowCount" ):
listener.exitRowCount(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowCount" ):
return visitor.visitRowCount(self)
else:
return visitor.visitChildren(self)
def rowCount(self):
localctx = SqlBaseParser.RowCountContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_rowCount)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1051
_la = self._input.LA(1)
if not(_la==SqlBaseParser.QUESTION_MARK or _la==SqlBaseParser.INTEGER_VALUE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryTermContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_queryTerm
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class QueryTermDefaultContext(QueryTermContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryTermContext
super().__init__(parser)
self.copyFrom(ctx)
def queryPrimary(self):
return self.getTypedRuleContext(SqlBaseParser.QueryPrimaryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQueryTermDefault" ):
listener.enterQueryTermDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQueryTermDefault" ):
listener.exitQueryTermDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQueryTermDefault" ):
return visitor.visitQueryTermDefault(self)
else:
return visitor.visitChildren(self)
class SetOperationContext(QueryTermContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryTermContext
super().__init__(parser)
self.left = None # QueryTermContext
self.operator = None # Token
self.right = None # QueryTermContext
self.copyFrom(ctx)
def queryTerm(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QueryTermContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QueryTermContext,i)
def INTERSECT(self):
return self.getToken(SqlBaseParser.INTERSECT, 0)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def UNION(self):
return self.getToken(SqlBaseParser.UNION, 0)
def EXCEPT(self):
return self.getToken(SqlBaseParser.EXCEPT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetOperation" ):
listener.enterSetOperation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetOperation" ):
listener.exitSetOperation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetOperation" ):
return visitor.visitSetOperation(self)
else:
return visitor.visitChildren(self)
def queryTerm(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.QueryTermContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 34
self.enterRecursionRule(localctx, 34, self.RULE_queryTerm, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
localctx = SqlBaseParser.QueryTermDefaultContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1054
self.queryPrimary()
self._ctx.stop = self._input.LT(-1)
self.state = 1070
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,125,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 1068
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,124,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.SetOperationContext(self, SqlBaseParser.QueryTermContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_queryTerm)
self.state = 1056
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 1057
localctx.operator = self.match(SqlBaseParser.INTERSECT)
self.state = 1059
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ALL or _la==SqlBaseParser.DISTINCT:
self.state = 1058
self.setQuantifier()
self.state = 1061
localctx.right = self.queryTerm(3)
elif la_ == 2:
localctx = SqlBaseParser.SetOperationContext(self, SqlBaseParser.QueryTermContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_queryTerm)
self.state = 1062
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1063
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.EXCEPT or _la==SqlBaseParser.UNION):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1065
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ALL or _la==SqlBaseParser.DISTINCT:
self.state = 1064
self.setQuantifier()
self.state = 1067
localctx.right = self.queryTerm(2)
self.state = 1072
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,125,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class QueryPrimaryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_queryPrimary
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SubqueryContext(QueryPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def queryNoWith(self):
return self.getTypedRuleContext(SqlBaseParser.QueryNoWithContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubquery" ):
listener.enterSubquery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubquery" ):
listener.exitSubquery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubquery" ):
return visitor.visitSubquery(self)
else:
return visitor.visitChildren(self)
class QueryPrimaryDefaultContext(QueryPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def querySpecification(self):
return self.getTypedRuleContext(SqlBaseParser.QuerySpecificationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQueryPrimaryDefault" ):
listener.enterQueryPrimaryDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQueryPrimaryDefault" ):
listener.exitQueryPrimaryDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQueryPrimaryDefault" ):
return visitor.visitQueryPrimaryDefault(self)
else:
return visitor.visitChildren(self)
class TableContext(QueryPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTable" ):
listener.enterTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTable" ):
listener.exitTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTable" ):
return visitor.visitTable(self)
else:
return visitor.visitChildren(self)
class InlineTableContext(QueryPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def VALUES(self):
return self.getToken(SqlBaseParser.VALUES, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInlineTable" ):
listener.enterInlineTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInlineTable" ):
listener.exitInlineTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInlineTable" ):
return visitor.visitInlineTable(self)
else:
return visitor.visitChildren(self)
def queryPrimary(self):
localctx = SqlBaseParser.QueryPrimaryContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_queryPrimary)
try:
self.state = 1089
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.SELECT]:
localctx = SqlBaseParser.QueryPrimaryDefaultContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1073
self.querySpecification()
elif token in [SqlBaseParser.TABLE]:
localctx = SqlBaseParser.TableContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1074
self.match(SqlBaseParser.TABLE)
self.state = 1075
self.qualifiedName()
elif token in [SqlBaseParser.VALUES]:
localctx = SqlBaseParser.InlineTableContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1076
self.match(SqlBaseParser.VALUES)
self.state = 1077
self.expression()
self.state = 1082
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,126,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1078
self.match(SqlBaseParser.T__3)
self.state = 1079
self.expression()
self.state = 1084
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,126,self._ctx)
elif token in [SqlBaseParser.T__1]:
localctx = SqlBaseParser.SubqueryContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 1085
self.match(SqlBaseParser.T__1)
self.state = 1086
self.queryNoWith()
self.state = 1087
self.match(SqlBaseParser.T__2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortItemContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.ordering = None # Token
self.nullOrdering = None # Token
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def NULLS(self):
return self.getToken(SqlBaseParser.NULLS, 0)
def ASC(self):
return self.getToken(SqlBaseParser.ASC, 0)
def DESC(self):
return self.getToken(SqlBaseParser.DESC, 0)
def FIRST(self):
return self.getToken(SqlBaseParser.FIRST, 0)
def LAST(self):
return self.getToken(SqlBaseParser.LAST, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_sortItem
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortItem" ):
listener.enterSortItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortItem" ):
listener.exitSortItem(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortItem" ):
return visitor.visitSortItem(self)
else:
return visitor.visitChildren(self)
def sortItem(self):
localctx = SqlBaseParser.SortItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_sortItem)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1091
self.expression()
self.state = 1093
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ASC or _la==SqlBaseParser.DESC:
self.state = 1092
localctx.ordering = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ASC or _la==SqlBaseParser.DESC):
localctx.ordering = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1097
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NULLS:
self.state = 1095
self.match(SqlBaseParser.NULLS)
self.state = 1096
localctx.nullOrdering = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FIRST or _la==SqlBaseParser.LAST):
localctx.nullOrdering = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QuerySpecificationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.where = None # BooleanExpressionContext
self.having = None # BooleanExpressionContext
def SELECT(self):
return self.getToken(SqlBaseParser.SELECT, 0)
def selectItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SelectItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SelectItemContext,i)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def relation(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RelationContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RelationContext,i)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def GROUP(self):
return self.getToken(SqlBaseParser.GROUP, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def groupBy(self):
return self.getTypedRuleContext(SqlBaseParser.GroupByContext,0)
def HAVING(self):
return self.getToken(SqlBaseParser.HAVING, 0)
def WINDOW(self):
return self.getToken(SqlBaseParser.WINDOW, 0)
def windowDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.WindowDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.WindowDefinitionContext,i)
def booleanExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.BooleanExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_querySpecification
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuerySpecification" ):
listener.enterQuerySpecification(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuerySpecification" ):
listener.exitQuerySpecification(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuerySpecification" ):
return visitor.visitQuerySpecification(self)
else:
return visitor.visitChildren(self)
def querySpecification(self):
localctx = SqlBaseParser.QuerySpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_querySpecification)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1099
self.match(SqlBaseParser.SELECT)
self.state = 1101
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,130,self._ctx)
if la_ == 1:
self.state = 1100
self.setQuantifier()
self.state = 1103
self.selectItem()
self.state = 1108
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,131,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1104
self.match(SqlBaseParser.T__3)
self.state = 1105
self.selectItem()
self.state = 1110
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,131,self._ctx)
self.state = 1120
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,133,self._ctx)
if la_ == 1:
self.state = 1111
self.match(SqlBaseParser.FROM)
self.state = 1112
self.relation(0)
self.state = 1117
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,132,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1113
self.match(SqlBaseParser.T__3)
self.state = 1114
self.relation(0)
self.state = 1119
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,132,self._ctx)
self.state = 1124
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,134,self._ctx)
if la_ == 1:
self.state = 1122
self.match(SqlBaseParser.WHERE)
self.state = 1123
localctx.where = self.booleanExpression(0)
self.state = 1129
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,135,self._ctx)
if la_ == 1:
self.state = 1126
self.match(SqlBaseParser.GROUP)
self.state = 1127
self.match(SqlBaseParser.BY)
self.state = 1128
self.groupBy()
self.state = 1133
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,136,self._ctx)
if la_ == 1:
self.state = 1131
self.match(SqlBaseParser.HAVING)
self.state = 1132
localctx.having = self.booleanExpression(0)
self.state = 1144
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,138,self._ctx)
if la_ == 1:
self.state = 1135
self.match(SqlBaseParser.WINDOW)
self.state = 1136
self.windowDefinition()
self.state = 1141
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,137,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1137
self.match(SqlBaseParser.T__3)
self.state = 1138
self.windowDefinition()
self.state = 1143
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,137,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GroupByContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def groupingElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.GroupingElementContext)
else:
return self.getTypedRuleContext(SqlBaseParser.GroupingElementContext,i)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_groupBy
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGroupBy" ):
listener.enterGroupBy(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGroupBy" ):
listener.exitGroupBy(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGroupBy" ):
return visitor.visitGroupBy(self)
else:
return visitor.visitChildren(self)
def groupBy(self):
localctx = SqlBaseParser.GroupByContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_groupBy)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1147
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,139,self._ctx)
if la_ == 1:
self.state = 1146
self.setQuantifier()
self.state = 1149
self.groupingElement()
self.state = 1154
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,140,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1150
self.match(SqlBaseParser.T__3)
self.state = 1151
self.groupingElement()
self.state = 1156
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,140,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GroupingElementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_groupingElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class MultipleGroupingSetsContext(GroupingElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GroupingElementContext
super().__init__(parser)
self.copyFrom(ctx)
def GROUPING(self):
return self.getToken(SqlBaseParser.GROUPING, 0)
def SETS(self):
return self.getToken(SqlBaseParser.SETS, 0)
def groupingSet(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.GroupingSetContext)
else:
return self.getTypedRuleContext(SqlBaseParser.GroupingSetContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMultipleGroupingSets" ):
listener.enterMultipleGroupingSets(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMultipleGroupingSets" ):
listener.exitMultipleGroupingSets(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMultipleGroupingSets" ):
return visitor.visitMultipleGroupingSets(self)
else:
return visitor.visitChildren(self)
class SingleGroupingSetContext(GroupingElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GroupingElementContext
super().__init__(parser)
self.copyFrom(ctx)
def groupingSet(self):
return self.getTypedRuleContext(SqlBaseParser.GroupingSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSingleGroupingSet" ):
listener.enterSingleGroupingSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSingleGroupingSet" ):
listener.exitSingleGroupingSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSingleGroupingSet" ):
return visitor.visitSingleGroupingSet(self)
else:
return visitor.visitChildren(self)
class CubeContext(GroupingElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GroupingElementContext
super().__init__(parser)
self.copyFrom(ctx)
def CUBE(self):
return self.getToken(SqlBaseParser.CUBE, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCube" ):
listener.enterCube(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCube" ):
listener.exitCube(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCube" ):
return visitor.visitCube(self)
else:
return visitor.visitChildren(self)
class RollupContext(GroupingElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GroupingElementContext
super().__init__(parser)
self.copyFrom(ctx)
def ROLLUP(self):
return self.getToken(SqlBaseParser.ROLLUP, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRollup" ):
listener.enterRollup(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRollup" ):
listener.exitRollup(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRollup" ):
return visitor.visitRollup(self)
else:
return visitor.visitChildren(self)
def groupingElement(self):
localctx = SqlBaseParser.GroupingElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_groupingElement)
self._la = 0 # Token type
try:
self.state = 1197
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,146,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.SingleGroupingSetContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1157
self.groupingSet()
elif la_ == 2:
localctx = SqlBaseParser.RollupContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1158
self.match(SqlBaseParser.ROLLUP)
self.state = 1159
self.match(SqlBaseParser.T__1)
self.state = 1168
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1160
self.expression()
self.state = 1165
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1161
self.match(SqlBaseParser.T__3)
self.state = 1162
self.expression()
self.state = 1167
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1170
self.match(SqlBaseParser.T__2)
elif la_ == 3:
localctx = SqlBaseParser.CubeContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1171
self.match(SqlBaseParser.CUBE)
self.state = 1172
self.match(SqlBaseParser.T__1)
self.state = 1181
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1173
self.expression()
self.state = 1178
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1174
self.match(SqlBaseParser.T__3)
self.state = 1175
self.expression()
self.state = 1180
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1183
self.match(SqlBaseParser.T__2)
elif la_ == 4:
localctx = SqlBaseParser.MultipleGroupingSetsContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 1184
self.match(SqlBaseParser.GROUPING)
self.state = 1185
self.match(SqlBaseParser.SETS)
self.state = 1186
self.match(SqlBaseParser.T__1)
self.state = 1187
self.groupingSet()
self.state = 1192
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1188
self.match(SqlBaseParser.T__3)
self.state = 1189
self.groupingSet()
self.state = 1194
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1195
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GroupingSetContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_groupingSet
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGroupingSet" ):
listener.enterGroupingSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGroupingSet" ):
listener.exitGroupingSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGroupingSet" ):
return visitor.visitGroupingSet(self)
else:
return visitor.visitChildren(self)
def groupingSet(self):
localctx = SqlBaseParser.GroupingSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_groupingSet)
self._la = 0 # Token type
try:
self.state = 1212
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,149,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1199
self.match(SqlBaseParser.T__1)
self.state = 1208
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1200
self.expression()
self.state = 1205
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1201
self.match(SqlBaseParser.T__3)
self.state = 1202
self.expression()
self.state = 1207
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1210
self.match(SqlBaseParser.T__2)
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1211
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WindowDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def windowSpecification(self):
return self.getTypedRuleContext(SqlBaseParser.WindowSpecificationContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_windowDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWindowDefinition" ):
listener.enterWindowDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWindowDefinition" ):
listener.exitWindowDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWindowDefinition" ):
return visitor.visitWindowDefinition(self)
else:
return visitor.visitChildren(self)
def windowDefinition(self):
localctx = SqlBaseParser.WindowDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_windowDefinition)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1214
localctx.name = self.identifier()
self.state = 1215
self.match(SqlBaseParser.AS)
self.state = 1216
self.match(SqlBaseParser.T__1)
self.state = 1217
self.windowSpecification()
self.state = 1218
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WindowSpecificationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.existingWindowName = None # IdentifierContext
self._expression = None # ExpressionContext
self.partition = list() # of ExpressionContexts
def PARTITION(self):
return self.getToken(SqlBaseParser.PARTITION, 0)
def BY(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.BY)
else:
return self.getToken(SqlBaseParser.BY, i)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def windowFrame(self):
return self.getTypedRuleContext(SqlBaseParser.WindowFrameContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_windowSpecification
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWindowSpecification" ):
listener.enterWindowSpecification(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWindowSpecification" ):
listener.exitWindowSpecification(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWindowSpecification" ):
return visitor.visitWindowSpecification(self)
else:
return visitor.visitChildren(self)
def windowSpecification(self):
localctx = SqlBaseParser.WindowSpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_windowSpecification)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1221
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,150,self._ctx)
if la_ == 1:
self.state = 1220
localctx.existingWindowName = self.identifier()
self.state = 1233
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.PARTITION:
self.state = 1223
self.match(SqlBaseParser.PARTITION)
self.state = 1224
self.match(SqlBaseParser.BY)
self.state = 1225
localctx._expression = self.expression()
localctx.partition.append(localctx._expression)
self.state = 1230
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1226
self.match(SqlBaseParser.T__3)
self.state = 1227
localctx._expression = self.expression()
localctx.partition.append(localctx._expression)
self.state = 1232
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1245
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ORDER:
self.state = 1235
self.match(SqlBaseParser.ORDER)
self.state = 1236
self.match(SqlBaseParser.BY)
self.state = 1237
self.sortItem()
self.state = 1242
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1238
self.match(SqlBaseParser.T__3)
self.state = 1239
self.sortItem()
self.state = 1244
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1248
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.GROUPS or _la==SqlBaseParser.MEASURES or _la==SqlBaseParser.RANGE or _la==SqlBaseParser.ROWS:
self.state = 1247
self.windowFrame()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NamedQueryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_namedQuery
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNamedQuery" ):
listener.enterNamedQuery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNamedQuery" ):
listener.exitNamedQuery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNamedQuery" ):
return visitor.visitNamedQuery(self)
else:
return visitor.visitChildren(self)
def namedQuery(self):
localctx = SqlBaseParser.NamedQueryContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_namedQuery)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1250
localctx.name = self.identifier()
self.state = 1252
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 1251
self.columnAliases()
self.state = 1254
self.match(SqlBaseParser.AS)
self.state = 1255
self.match(SqlBaseParser.T__1)
self.state = 1256
self.query()
self.state = 1257
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetQuantifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DISTINCT(self):
return self.getToken(SqlBaseParser.DISTINCT, 0)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_setQuantifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetQuantifier" ):
listener.enterSetQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetQuantifier" ):
listener.exitSetQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetQuantifier" ):
return visitor.visitSetQuantifier(self)
else:
return visitor.visitChildren(self)
def setQuantifier(self):
localctx = SqlBaseParser.SetQuantifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_setQuantifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1259
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ALL or _la==SqlBaseParser.DISTINCT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SelectItemContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_selectItem
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SelectAllContext(SelectItemContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.SelectItemContext
super().__init__(parser)
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(SqlBaseParser.PrimaryExpressionContext,0)
def ASTERISK(self):
return self.getToken(SqlBaseParser.ASTERISK, 0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSelectAll" ):
listener.enterSelectAll(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSelectAll" ):
listener.exitSelectAll(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSelectAll" ):
return visitor.visitSelectAll(self)
else:
return visitor.visitChildren(self)
class SelectSingleContext(SelectItemContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.SelectItemContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSelectSingle" ):
listener.enterSelectSingle(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSelectSingle" ):
listener.exitSelectSingle(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSelectSingle" ):
return visitor.visitSelectSingle(self)
else:
return visitor.visitChildren(self)
def selectItem(self):
localctx = SqlBaseParser.SelectItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_selectItem)
self._la = 0 # Token type
try:
self.state = 1276
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,160,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.SelectSingleContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1261
self.expression()
self.state = 1266
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,158,self._ctx)
if la_ == 1:
self.state = 1263
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AS:
self.state = 1262
self.match(SqlBaseParser.AS)
self.state = 1265
self.identifier()
elif la_ == 2:
localctx = SqlBaseParser.SelectAllContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1268
self.primaryExpression(0)
self.state = 1269
self.match(SqlBaseParser.T__0)
self.state = 1270
self.match(SqlBaseParser.ASTERISK)
self.state = 1273
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,159,self._ctx)
if la_ == 1:
self.state = 1271
self.match(SqlBaseParser.AS)
self.state = 1272
self.columnAliases()
elif la_ == 3:
localctx = SqlBaseParser.SelectAllContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1275
self.match(SqlBaseParser.ASTERISK)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RelationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_relation
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RelationDefaultContext(RelationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationContext
super().__init__(parser)
self.copyFrom(ctx)
def sampledRelation(self):
return self.getTypedRuleContext(SqlBaseParser.SampledRelationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRelationDefault" ):
listener.enterRelationDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRelationDefault" ):
listener.exitRelationDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRelationDefault" ):
return visitor.visitRelationDefault(self)
else:
return visitor.visitChildren(self)
class JoinRelationContext(RelationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationContext
super().__init__(parser)
self.left = None # RelationContext
self.right = None # SampledRelationContext
self.rightRelation = None # RelationContext
self.copyFrom(ctx)
def relation(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RelationContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RelationContext,i)
def CROSS(self):
return self.getToken(SqlBaseParser.CROSS, 0)
def JOIN(self):
return self.getToken(SqlBaseParser.JOIN, 0)
def joinType(self):
return self.getTypedRuleContext(SqlBaseParser.JoinTypeContext,0)
def joinCriteria(self):
return self.getTypedRuleContext(SqlBaseParser.JoinCriteriaContext,0)
def NATURAL(self):
return self.getToken(SqlBaseParser.NATURAL, 0)
def sampledRelation(self):
return self.getTypedRuleContext(SqlBaseParser.SampledRelationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterJoinRelation" ):
listener.enterJoinRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitJoinRelation" ):
listener.exitJoinRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitJoinRelation" ):
return visitor.visitJoinRelation(self)
else:
return visitor.visitChildren(self)
def relation(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.RelationContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 58
self.enterRecursionRule(localctx, 58, self.RULE_relation, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = SqlBaseParser.RelationDefaultContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1279
self.sampledRelation()
self._ctx.stop = self._input.LT(-1)
self.state = 1299
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,162,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = SqlBaseParser.JoinRelationContext(self, SqlBaseParser.RelationContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_relation)
self.state = 1281
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 1295
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.CROSS]:
self.state = 1282
self.match(SqlBaseParser.CROSS)
self.state = 1283
self.match(SqlBaseParser.JOIN)
self.state = 1284
localctx.right = self.sampledRelation()
elif token in [SqlBaseParser.FULL, SqlBaseParser.INNER, SqlBaseParser.JOIN, SqlBaseParser.LEFT, SqlBaseParser.RIGHT]:
self.state = 1285
self.joinType()
self.state = 1286
self.match(SqlBaseParser.JOIN)
self.state = 1287
localctx.rightRelation = self.relation(0)
self.state = 1288
self.joinCriteria()
elif token in [SqlBaseParser.NATURAL]:
self.state = 1290
self.match(SqlBaseParser.NATURAL)
self.state = 1291
self.joinType()
self.state = 1292
self.match(SqlBaseParser.JOIN)
self.state = 1293
localctx.right = self.sampledRelation()
else:
raise NoViableAltException(self)
self.state = 1301
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,162,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class JoinTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INNER(self):
return self.getToken(SqlBaseParser.INNER, 0)
def LEFT(self):
return self.getToken(SqlBaseParser.LEFT, 0)
def OUTER(self):
return self.getToken(SqlBaseParser.OUTER, 0)
def RIGHT(self):
return self.getToken(SqlBaseParser.RIGHT, 0)
def FULL(self):
return self.getToken(SqlBaseParser.FULL, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_joinType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterJoinType" ):
listener.enterJoinType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitJoinType" ):
listener.exitJoinType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitJoinType" ):
return visitor.visitJoinType(self)
else:
return visitor.visitChildren(self)
def joinType(self):
localctx = SqlBaseParser.JoinTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_joinType)
self._la = 0 # Token type
try:
self.state = 1317
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.INNER, SqlBaseParser.JOIN]:
self.enterOuterAlt(localctx, 1)
self.state = 1303
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INNER:
self.state = 1302
self.match(SqlBaseParser.INNER)
elif token in [SqlBaseParser.LEFT]:
self.enterOuterAlt(localctx, 2)
self.state = 1305
self.match(SqlBaseParser.LEFT)
self.state = 1307
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OUTER:
self.state = 1306
self.match(SqlBaseParser.OUTER)
elif token in [SqlBaseParser.RIGHT]:
self.enterOuterAlt(localctx, 3)
self.state = 1309
self.match(SqlBaseParser.RIGHT)
self.state = 1311
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OUTER:
self.state = 1310
self.match(SqlBaseParser.OUTER)
elif token in [SqlBaseParser.FULL]:
self.enterOuterAlt(localctx, 4)
self.state = 1313
self.match(SqlBaseParser.FULL)
self.state = 1315
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OUTER:
self.state = 1314
self.match(SqlBaseParser.OUTER)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class JoinCriteriaContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def USING(self):
return self.getToken(SqlBaseParser.USING, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_joinCriteria
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterJoinCriteria" ):
listener.enterJoinCriteria(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitJoinCriteria" ):
listener.exitJoinCriteria(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitJoinCriteria" ):
return visitor.visitJoinCriteria(self)
else:
return visitor.visitChildren(self)
def joinCriteria(self):
localctx = SqlBaseParser.JoinCriteriaContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_joinCriteria)
self._la = 0 # Token type
try:
self.state = 1333
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ON]:
self.enterOuterAlt(localctx, 1)
self.state = 1319
self.match(SqlBaseParser.ON)
self.state = 1320
self.booleanExpression(0)
elif token in [SqlBaseParser.USING]:
self.enterOuterAlt(localctx, 2)
self.state = 1321
self.match(SqlBaseParser.USING)
self.state = 1322
self.match(SqlBaseParser.T__1)
self.state = 1323
self.identifier()
self.state = 1328
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1324
self.match(SqlBaseParser.T__3)
self.state = 1325
self.identifier()
self.state = 1330
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1331
self.match(SqlBaseParser.T__2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SampledRelationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.percentage = None # ExpressionContext
def patternRecognition(self):
return self.getTypedRuleContext(SqlBaseParser.PatternRecognitionContext,0)
def TABLESAMPLE(self):
return self.getToken(SqlBaseParser.TABLESAMPLE, 0)
def sampleType(self):
return self.getTypedRuleContext(SqlBaseParser.SampleTypeContext,0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_sampledRelation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSampledRelation" ):
listener.enterSampledRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSampledRelation" ):
listener.exitSampledRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSampledRelation" ):
return visitor.visitSampledRelation(self)
else:
return visitor.visitChildren(self)
def sampledRelation(self):
localctx = SqlBaseParser.SampledRelationContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_sampledRelation)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1335
self.patternRecognition()
self.state = 1342
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,170,self._ctx)
if la_ == 1:
self.state = 1336
self.match(SqlBaseParser.TABLESAMPLE)
self.state = 1337
self.sampleType()
self.state = 1338
self.match(SqlBaseParser.T__1)
self.state = 1339
localctx.percentage = self.expression()
self.state = 1340
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SampleTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BERNOULLI(self):
return self.getToken(SqlBaseParser.BERNOULLI, 0)
def SYSTEM(self):
return self.getToken(SqlBaseParser.SYSTEM, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_sampleType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSampleType" ):
listener.enterSampleType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSampleType" ):
listener.exitSampleType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSampleType" ):
return visitor.visitSampleType(self)
else:
return visitor.visitChildren(self)
def sampleType(self):
localctx = SqlBaseParser.SampleTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_sampleType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1344
_la = self._input.LA(1)
if not(_la==SqlBaseParser.BERNOULLI or _la==SqlBaseParser.SYSTEM):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ListAggOverflowBehaviorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ERROR(self):
return self.getToken(SqlBaseParser.ERROR, 0)
def TRUNCATE(self):
return self.getToken(SqlBaseParser.TRUNCATE, 0)
def listaggCountIndication(self):
return self.getTypedRuleContext(SqlBaseParser.ListaggCountIndicationContext,0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_listAggOverflowBehavior
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListAggOverflowBehavior" ):
listener.enterListAggOverflowBehavior(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListAggOverflowBehavior" ):
listener.exitListAggOverflowBehavior(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitListAggOverflowBehavior" ):
return visitor.visitListAggOverflowBehavior(self)
else:
return visitor.visitChildren(self)
def listAggOverflowBehavior(self):
localctx = SqlBaseParser.ListAggOverflowBehaviorContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_listAggOverflowBehavior)
self._la = 0 # Token type
try:
self.state = 1352
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ERROR]:
self.enterOuterAlt(localctx, 1)
self.state = 1346
self.match(SqlBaseParser.ERROR)
elif token in [SqlBaseParser.TRUNCATE]:
self.enterOuterAlt(localctx, 2)
self.state = 1347
self.match(SqlBaseParser.TRUNCATE)
self.state = 1349
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.STRING or _la==SqlBaseParser.UNICODE_STRING:
self.state = 1348
self.string()
self.state = 1351
self.listaggCountIndication()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ListaggCountIndicationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def COUNT(self):
return self.getToken(SqlBaseParser.COUNT, 0)
def WITHOUT(self):
return self.getToken(SqlBaseParser.WITHOUT, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_listaggCountIndication
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListaggCountIndication" ):
listener.enterListaggCountIndication(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListaggCountIndication" ):
listener.exitListaggCountIndication(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitListaggCountIndication" ):
return visitor.visitListaggCountIndication(self)
else:
return visitor.visitChildren(self)
def listaggCountIndication(self):
localctx = SqlBaseParser.ListaggCountIndicationContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_listaggCountIndication)
try:
self.state = 1358
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.WITH]:
self.enterOuterAlt(localctx, 1)
self.state = 1354
self.match(SqlBaseParser.WITH)
self.state = 1355
self.match(SqlBaseParser.COUNT)
elif token in [SqlBaseParser.WITHOUT]:
self.enterOuterAlt(localctx, 2)
self.state = 1356
self.match(SqlBaseParser.WITHOUT)
self.state = 1357
self.match(SqlBaseParser.COUNT)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PatternRecognitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._expression = None # ExpressionContext
self.partition = list() # of ExpressionContexts
def aliasedRelation(self):
return self.getTypedRuleContext(SqlBaseParser.AliasedRelationContext,0)
def MATCH_RECOGNIZE(self):
return self.getToken(SqlBaseParser.MATCH_RECOGNIZE, 0)
def PATTERN(self):
return self.getToken(SqlBaseParser.PATTERN, 0)
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def DEFINE(self):
return self.getToken(SqlBaseParser.DEFINE, 0)
def variableDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.VariableDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.VariableDefinitionContext,i)
def PARTITION(self):
return self.getToken(SqlBaseParser.PARTITION, 0)
def BY(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.BY)
else:
return self.getToken(SqlBaseParser.BY, i)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def MEASURES(self):
return self.getToken(SqlBaseParser.MEASURES, 0)
def measureDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.MeasureDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.MeasureDefinitionContext,i)
def rowsPerMatch(self):
return self.getTypedRuleContext(SqlBaseParser.RowsPerMatchContext,0)
def AFTER(self):
return self.getToken(SqlBaseParser.AFTER, 0)
def MATCH(self):
return self.getToken(SqlBaseParser.MATCH, 0)
def skipTo(self):
return self.getTypedRuleContext(SqlBaseParser.SkipToContext,0)
def SUBSET(self):
return self.getToken(SqlBaseParser.SUBSET, 0)
def subsetDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SubsetDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SubsetDefinitionContext,i)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def INITIAL(self):
return self.getToken(SqlBaseParser.INITIAL, 0)
def SEEK(self):
return self.getToken(SqlBaseParser.SEEK, 0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_patternRecognition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternRecognition" ):
listener.enterPatternRecognition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternRecognition" ):
listener.exitPatternRecognition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternRecognition" ):
return visitor.visitPatternRecognition(self)
else:
return visitor.visitChildren(self)
def patternRecognition(self):
localctx = SqlBaseParser.PatternRecognitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_patternRecognition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1360
self.aliasedRelation()
self.state = 1443
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,189,self._ctx)
if la_ == 1:
self.state = 1361
self.match(SqlBaseParser.MATCH_RECOGNIZE)
self.state = 1362
self.match(SqlBaseParser.T__1)
self.state = 1373
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.PARTITION:
self.state = 1363
self.match(SqlBaseParser.PARTITION)
self.state = 1364
self.match(SqlBaseParser.BY)
self.state = 1365
localctx._expression = self.expression()
localctx.partition.append(localctx._expression)
self.state = 1370
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1366
self.match(SqlBaseParser.T__3)
self.state = 1367
localctx._expression = self.expression()
localctx.partition.append(localctx._expression)
self.state = 1372
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1385
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ORDER:
self.state = 1375
self.match(SqlBaseParser.ORDER)
self.state = 1376
self.match(SqlBaseParser.BY)
self.state = 1377
self.sortItem()
self.state = 1382
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1378
self.match(SqlBaseParser.T__3)
self.state = 1379
self.sortItem()
self.state = 1384
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1396
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MEASURES:
self.state = 1387
self.match(SqlBaseParser.MEASURES)
self.state = 1388
self.measureDefinition()
self.state = 1393
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1389
self.match(SqlBaseParser.T__3)
self.state = 1390
self.measureDefinition()
self.state = 1395
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1399
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ALL or _la==SqlBaseParser.ONE:
self.state = 1398
self.rowsPerMatch()
self.state = 1404
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AFTER:
self.state = 1401
self.match(SqlBaseParser.AFTER)
self.state = 1402
self.match(SqlBaseParser.MATCH)
self.state = 1403
self.skipTo()
self.state = 1407
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INITIAL or _la==SqlBaseParser.SEEK:
self.state = 1406
_la = self._input.LA(1)
if not(_la==SqlBaseParser.INITIAL or _la==SqlBaseParser.SEEK):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1409
self.match(SqlBaseParser.PATTERN)
self.state = 1410
self.match(SqlBaseParser.T__1)
self.state = 1411
self.rowPattern(0)
self.state = 1412
self.match(SqlBaseParser.T__2)
self.state = 1422
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.SUBSET:
self.state = 1413
self.match(SqlBaseParser.SUBSET)
self.state = 1414
self.subsetDefinition()
self.state = 1419
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1415
self.match(SqlBaseParser.T__3)
self.state = 1416
self.subsetDefinition()
self.state = 1421
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1424
self.match(SqlBaseParser.DEFINE)
self.state = 1425
self.variableDefinition()
self.state = 1430
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1426
self.match(SqlBaseParser.T__3)
self.state = 1427
self.variableDefinition()
self.state = 1432
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1433
self.match(SqlBaseParser.T__2)
self.state = 1441
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,188,self._ctx)
if la_ == 1:
self.state = 1435
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AS:
self.state = 1434
self.match(SqlBaseParser.AS)
self.state = 1437
self.identifier()
self.state = 1439
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,187,self._ctx)
if la_ == 1:
self.state = 1438
self.columnAliases()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MeasureDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_measureDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMeasureDefinition" ):
listener.enterMeasureDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMeasureDefinition" ):
listener.exitMeasureDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMeasureDefinition" ):
return visitor.visitMeasureDefinition(self)
else:
return visitor.visitChildren(self)
def measureDefinition(self):
localctx = SqlBaseParser.MeasureDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_measureDefinition)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1445
self.expression()
self.state = 1446
self.match(SqlBaseParser.AS)
self.state = 1447
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RowsPerMatchContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ONE(self):
return self.getToken(SqlBaseParser.ONE, 0)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def PER(self):
return self.getToken(SqlBaseParser.PER, 0)
def MATCH(self):
return self.getToken(SqlBaseParser.MATCH, 0)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def ROWS(self):
return self.getToken(SqlBaseParser.ROWS, 0)
def emptyMatchHandling(self):
return self.getTypedRuleContext(SqlBaseParser.EmptyMatchHandlingContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_rowsPerMatch
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowsPerMatch" ):
listener.enterRowsPerMatch(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowsPerMatch" ):
listener.exitRowsPerMatch(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowsPerMatch" ):
return visitor.visitRowsPerMatch(self)
else:
return visitor.visitChildren(self)
def rowsPerMatch(self):
localctx = SqlBaseParser.RowsPerMatchContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_rowsPerMatch)
self._la = 0 # Token type
try:
self.state = 1460
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ONE]:
self.enterOuterAlt(localctx, 1)
self.state = 1449
self.match(SqlBaseParser.ONE)
self.state = 1450
self.match(SqlBaseParser.ROW)
self.state = 1451
self.match(SqlBaseParser.PER)
self.state = 1452
self.match(SqlBaseParser.MATCH)
elif token in [SqlBaseParser.ALL]:
self.enterOuterAlt(localctx, 2)
self.state = 1453
self.match(SqlBaseParser.ALL)
self.state = 1454
self.match(SqlBaseParser.ROWS)
self.state = 1455
self.match(SqlBaseParser.PER)
self.state = 1456
self.match(SqlBaseParser.MATCH)
self.state = 1458
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OMIT or _la==SqlBaseParser.SHOW or _la==SqlBaseParser.WITH:
self.state = 1457
self.emptyMatchHandling()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EmptyMatchHandlingContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def EMPTY(self):
return self.getToken(SqlBaseParser.EMPTY, 0)
def MATCHES(self):
return self.getToken(SqlBaseParser.MATCHES, 0)
def OMIT(self):
return self.getToken(SqlBaseParser.OMIT, 0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def UNMATCHED(self):
return self.getToken(SqlBaseParser.UNMATCHED, 0)
def ROWS(self):
return self.getToken(SqlBaseParser.ROWS, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_emptyMatchHandling
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEmptyMatchHandling" ):
listener.enterEmptyMatchHandling(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEmptyMatchHandling" ):
listener.exitEmptyMatchHandling(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEmptyMatchHandling" ):
return visitor.visitEmptyMatchHandling(self)
else:
return visitor.visitChildren(self)
def emptyMatchHandling(self):
localctx = SqlBaseParser.EmptyMatchHandlingContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_emptyMatchHandling)
try:
self.state = 1471
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.SHOW]:
self.enterOuterAlt(localctx, 1)
self.state = 1462
self.match(SqlBaseParser.SHOW)
self.state = 1463
self.match(SqlBaseParser.EMPTY)
self.state = 1464
self.match(SqlBaseParser.MATCHES)
elif token in [SqlBaseParser.OMIT]:
self.enterOuterAlt(localctx, 2)
self.state = 1465
self.match(SqlBaseParser.OMIT)
self.state = 1466
self.match(SqlBaseParser.EMPTY)
self.state = 1467
self.match(SqlBaseParser.MATCHES)
elif token in [SqlBaseParser.WITH]:
self.enterOuterAlt(localctx, 3)
self.state = 1468
self.match(SqlBaseParser.WITH)
self.state = 1469
self.match(SqlBaseParser.UNMATCHED)
self.state = 1470
self.match(SqlBaseParser.ROWS)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SkipToContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def NEXT(self):
return self.getToken(SqlBaseParser.NEXT, 0)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def PAST(self):
return self.getToken(SqlBaseParser.PAST, 0)
def LAST(self):
return self.getToken(SqlBaseParser.LAST, 0)
def FIRST(self):
return self.getToken(SqlBaseParser.FIRST, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_skipTo
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSkipTo" ):
listener.enterSkipTo(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSkipTo" ):
listener.exitSkipTo(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSkipTo" ):
return visitor.visitSkipTo(self)
else:
return visitor.visitChildren(self)
def skipTo(self):
localctx = SqlBaseParser.SkipToContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_skipTo)
try:
self.state = 1492
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,193,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1473
self.match(SqlBaseParser.T__4)
self.state = 1474
self.match(SqlBaseParser.TO)
self.state = 1475
self.match(SqlBaseParser.NEXT)
self.state = 1476
self.match(SqlBaseParser.ROW)
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1477
self.match(SqlBaseParser.T__4)
self.state = 1478
self.match(SqlBaseParser.PAST)
self.state = 1479
self.match(SqlBaseParser.LAST)
self.state = 1480
self.match(SqlBaseParser.ROW)
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1481
self.match(SqlBaseParser.T__4)
self.state = 1482
self.match(SqlBaseParser.TO)
self.state = 1483
self.match(SqlBaseParser.FIRST)
self.state = 1484
self.identifier()
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 1485
self.match(SqlBaseParser.T__4)
self.state = 1486
self.match(SqlBaseParser.TO)
self.state = 1487
self.match(SqlBaseParser.LAST)
self.state = 1488
self.identifier()
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 1489
self.match(SqlBaseParser.T__4)
self.state = 1490
self.match(SqlBaseParser.TO)
self.state = 1491
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SubsetDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
self._identifier = None # IdentifierContext
self.union = list() # of IdentifierContexts
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_subsetDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubsetDefinition" ):
listener.enterSubsetDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubsetDefinition" ):
listener.exitSubsetDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubsetDefinition" ):
return visitor.visitSubsetDefinition(self)
else:
return visitor.visitChildren(self)
def subsetDefinition(self):
localctx = SqlBaseParser.SubsetDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_subsetDefinition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1494
localctx.name = self.identifier()
self.state = 1495
self.match(SqlBaseParser.EQ)
self.state = 1496
self.match(SqlBaseParser.T__1)
self.state = 1497
localctx._identifier = self.identifier()
localctx.union.append(localctx._identifier)
self.state = 1502
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1498
self.match(SqlBaseParser.T__3)
self.state = 1499
localctx._identifier = self.identifier()
localctx.union.append(localctx._identifier)
self.state = 1504
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1505
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VariableDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_variableDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVariableDefinition" ):
listener.enterVariableDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVariableDefinition" ):
listener.exitVariableDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVariableDefinition" ):
return visitor.visitVariableDefinition(self)
else:
return visitor.visitChildren(self)
def variableDefinition(self):
localctx = SqlBaseParser.VariableDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_variableDefinition)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1507
self.identifier()
self.state = 1508
self.match(SqlBaseParser.AS)
self.state = 1509
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AliasedRelationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def relationPrimary(self):
return self.getTypedRuleContext(SqlBaseParser.RelationPrimaryContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_aliasedRelation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAliasedRelation" ):
listener.enterAliasedRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAliasedRelation" ):
listener.exitAliasedRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAliasedRelation" ):
return visitor.visitAliasedRelation(self)
else:
return visitor.visitChildren(self)
def aliasedRelation(self):
localctx = SqlBaseParser.AliasedRelationContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_aliasedRelation)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1511
self.relationPrimary()
self.state = 1519
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,197,self._ctx)
if la_ == 1:
self.state = 1513
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AS:
self.state = 1512
self.match(SqlBaseParser.AS)
self.state = 1515
self.identifier()
self.state = 1517
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,196,self._ctx)
if la_ == 1:
self.state = 1516
self.columnAliases()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ColumnAliasesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_columnAliases
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterColumnAliases" ):
listener.enterColumnAliases(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitColumnAliases" ):
listener.exitColumnAliases(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitColumnAliases" ):
return visitor.visitColumnAliases(self)
else:
return visitor.visitChildren(self)
def columnAliases(self):
localctx = SqlBaseParser.ColumnAliasesContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_columnAliases)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1521
self.match(SqlBaseParser.T__1)
self.state = 1522
self.identifier()
self.state = 1527
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1523
self.match(SqlBaseParser.T__3)
self.state = 1524
self.identifier()
self.state = 1529
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1530
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RelationPrimaryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_relationPrimary
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SubqueryRelationContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubqueryRelation" ):
listener.enterSubqueryRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubqueryRelation" ):
listener.exitSubqueryRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubqueryRelation" ):
return visitor.visitSubqueryRelation(self)
else:
return visitor.visitChildren(self)
class ParenthesizedRelationContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def relation(self):
return self.getTypedRuleContext(SqlBaseParser.RelationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParenthesizedRelation" ):
listener.enterParenthesizedRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParenthesizedRelation" ):
listener.exitParenthesizedRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParenthesizedRelation" ):
return visitor.visitParenthesizedRelation(self)
else:
return visitor.visitChildren(self)
class UnnestContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def UNNEST(self):
return self.getToken(SqlBaseParser.UNNEST, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def ORDINALITY(self):
return self.getToken(SqlBaseParser.ORDINALITY, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnnest" ):
listener.enterUnnest(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnnest" ):
listener.exitUnnest(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnnest" ):
return visitor.visitUnnest(self)
else:
return visitor.visitChildren(self)
class LateralContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def LATERAL(self):
return self.getToken(SqlBaseParser.LATERAL, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLateral" ):
listener.enterLateral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLateral" ):
listener.exitLateral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLateral" ):
return visitor.visitLateral(self)
else:
return visitor.visitChildren(self)
class TableNameContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def queryPeriod(self):
return self.getTypedRuleContext(SqlBaseParser.QueryPeriodContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTableName" ):
listener.enterTableName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTableName" ):
listener.exitTableName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTableName" ):
return visitor.visitTableName(self)
else:
return visitor.visitChildren(self)
def relationPrimary(self):
localctx = SqlBaseParser.RelationPrimaryContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_relationPrimary)
self._la = 0 # Token type
try:
self.state = 1564
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,202,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.TableNameContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1532
self.qualifiedName()
self.state = 1534
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,199,self._ctx)
if la_ == 1:
self.state = 1533
self.queryPeriod()
elif la_ == 2:
localctx = SqlBaseParser.SubqueryRelationContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1536
self.match(SqlBaseParser.T__1)
self.state = 1537
self.query()
self.state = 1538
self.match(SqlBaseParser.T__2)
elif la_ == 3:
localctx = SqlBaseParser.UnnestContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1540
self.match(SqlBaseParser.UNNEST)
self.state = 1541
self.match(SqlBaseParser.T__1)
self.state = 1542
self.expression()
self.state = 1547
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1543
self.match(SqlBaseParser.T__3)
self.state = 1544
self.expression()
self.state = 1549
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1550
self.match(SqlBaseParser.T__2)
self.state = 1553
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,201,self._ctx)
if la_ == 1:
self.state = 1551
self.match(SqlBaseParser.WITH)
self.state = 1552
self.match(SqlBaseParser.ORDINALITY)
elif la_ == 4:
localctx = SqlBaseParser.LateralContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 1555
self.match(SqlBaseParser.LATERAL)
self.state = 1556
self.match(SqlBaseParser.T__1)
self.state = 1557
self.query()
self.state = 1558
self.match(SqlBaseParser.T__2)
elif la_ == 5:
localctx = SqlBaseParser.ParenthesizedRelationContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 1560
self.match(SqlBaseParser.T__1)
self.state = 1561
self.relation(0)
self.state = 1562
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpression" ):
return visitor.visitExpression(self)
else:
return visitor.visitChildren(self)
def expression(self):
localctx = SqlBaseParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_expression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1566
self.booleanExpression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BooleanExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_booleanExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LogicalNotContext(BooleanExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.BooleanExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLogicalNot" ):
listener.enterLogicalNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLogicalNot" ):
listener.exitLogicalNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalNot" ):
return visitor.visitLogicalNot(self)
else:
return visitor.visitChildren(self)
class PredicatedContext(BooleanExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.BooleanExpressionContext
super().__init__(parser)
self._valueExpression = None # ValueExpressionContext
self.copyFrom(ctx)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def predicate(self):
return self.getTypedRuleContext(SqlBaseParser.PredicateContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPredicated" ):
listener.enterPredicated(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPredicated" ):
listener.exitPredicated(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPredicated" ):
return visitor.visitPredicated(self)
else:
return visitor.visitChildren(self)
class And_Context(BooleanExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.BooleanExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def booleanExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.BooleanExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,i)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAnd_" ):
listener.enterAnd_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAnd_" ):
listener.exitAnd_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAnd_" ):
return visitor.visitAnd_(self)
else:
return visitor.visitChildren(self)
class Or_Context(BooleanExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.BooleanExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def booleanExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.BooleanExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,i)
def OR(self):
return self.getToken(SqlBaseParser.OR, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOr_" ):
listener.enterOr_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOr_" ):
listener.exitOr_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOr_" ):
return visitor.visitOr_(self)
else:
return visitor.visitChildren(self)
def booleanExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.BooleanExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 94
self.enterRecursionRule(localctx, 94, self.RULE_booleanExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1575
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.T__1, SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CASE, SqlBaseParser.CAST, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.CURRENT_CATALOG, SqlBaseParser.CURRENT_DATE, SqlBaseParser.CURRENT_PATH, SqlBaseParser.CURRENT_SCHEMA, SqlBaseParser.CURRENT_TIME, SqlBaseParser.CURRENT_TIMESTAMP, SqlBaseParser.CURRENT_USER, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXISTS, SqlBaseParser.EXPLAIN, SqlBaseParser.EXTRACT, SqlBaseParser.FALSE, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPING, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LISTAGG, SqlBaseParser.LOCAL, SqlBaseParser.LOCALTIME, SqlBaseParser.LOCALTIMESTAMP, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NORMALIZE, SqlBaseParser.NULL, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUE, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.PLUS, SqlBaseParser.MINUS, SqlBaseParser.QUESTION_MARK, SqlBaseParser.STRING, SqlBaseParser.UNICODE_STRING, SqlBaseParser.BINARY_LITERAL, SqlBaseParser.INTEGER_VALUE, SqlBaseParser.DECIMAL_VALUE, SqlBaseParser.DOUBLE_VALUE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
localctx = SqlBaseParser.PredicatedContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1569
localctx._valueExpression = self.valueExpression(0)
self.state = 1571
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,203,self._ctx)
if la_ == 1:
self.state = 1570
self.predicate(localctx._valueExpression)
elif token in [SqlBaseParser.NOT]:
localctx = SqlBaseParser.LogicalNotContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1573
self.match(SqlBaseParser.NOT)
self.state = 1574
self.booleanExpression(3)
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 1585
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,206,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 1583
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,205,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.And_Context(self, SqlBaseParser.BooleanExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_booleanExpression)
self.state = 1577
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 1578
self.match(SqlBaseParser.AND)
self.state = 1579
self.booleanExpression(3)
elif la_ == 2:
localctx = SqlBaseParser.Or_Context(self, SqlBaseParser.BooleanExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_booleanExpression)
self.state = 1580
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1581
self.match(SqlBaseParser.OR)
self.state = 1582
self.booleanExpression(2)
self.state = 1587
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,206,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PredicateContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1, value:ParserRuleContext=None):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None
self.value = value
def getRuleIndex(self):
return SqlBaseParser.RULE_predicate
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
self.value = ctx.value
class ComparisonContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.right = None # ValueExpressionContext
self.copyFrom(ctx)
def comparisonOperator(self):
return self.getTypedRuleContext(SqlBaseParser.ComparisonOperatorContext,0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComparison" ):
listener.enterComparison(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComparison" ):
listener.exitComparison(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitComparison" ):
return visitor.visitComparison(self)
else:
return visitor.visitChildren(self)
class LikeContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.pattern = None # ValueExpressionContext
self.escape = None # ValueExpressionContext
self.copyFrom(ctx)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLike" ):
listener.enterLike(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLike" ):
listener.exitLike(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLike" ):
return visitor.visitLike(self)
else:
return visitor.visitChildren(self)
class InSubqueryContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.copyFrom(ctx)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInSubquery" ):
listener.enterInSubquery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInSubquery" ):
listener.exitInSubquery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInSubquery" ):
return visitor.visitInSubquery(self)
else:
return visitor.visitChildren(self)
class DistinctFromContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.right = None # ValueExpressionContext
self.copyFrom(ctx)
def IS(self):
return self.getToken(SqlBaseParser.IS, 0)
def DISTINCT(self):
return self.getToken(SqlBaseParser.DISTINCT, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDistinctFrom" ):
listener.enterDistinctFrom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDistinctFrom" ):
listener.exitDistinctFrom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDistinctFrom" ):
return visitor.visitDistinctFrom(self)
else:
return visitor.visitChildren(self)
class InListContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.copyFrom(ctx)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInList" ):
listener.enterInList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInList" ):
listener.exitInList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInList" ):
return visitor.visitInList(self)
else:
return visitor.visitChildren(self)
class NullPredicateContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.copyFrom(ctx)
def IS(self):
return self.getToken(SqlBaseParser.IS, 0)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNullPredicate" ):
listener.enterNullPredicate(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNullPredicate" ):
listener.exitNullPredicate(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNullPredicate" ):
return visitor.visitNullPredicate(self)
else:
return visitor.visitChildren(self)
class BetweenContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.lower = None # ValueExpressionContext
self.upper = None # ValueExpressionContext
self.copyFrom(ctx)
def BETWEEN(self):
return self.getToken(SqlBaseParser.BETWEEN, 0)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBetween" ):
listener.enterBetween(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBetween" ):
listener.exitBetween(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBetween" ):
return visitor.visitBetween(self)
else:
return visitor.visitChildren(self)
class QuantifiedComparisonContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.copyFrom(ctx)
def comparisonOperator(self):
return self.getTypedRuleContext(SqlBaseParser.ComparisonOperatorContext,0)
def comparisonQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.ComparisonQuantifierContext,0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuantifiedComparison" ):
listener.enterQuantifiedComparison(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuantifiedComparison" ):
listener.exitQuantifiedComparison(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuantifiedComparison" ):
return visitor.visitQuantifiedComparison(self)
else:
return visitor.visitChildren(self)
def predicate(self, value:ParserRuleContext):
localctx = SqlBaseParser.PredicateContext(self, self._ctx, self.state, value)
self.enterRule(localctx, 96, self.RULE_predicate)
self._la = 0 # Token type
try:
self.state = 1649
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,215,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ComparisonContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1588
self.comparisonOperator()
self.state = 1589
localctx.right = self.valueExpression(0)
elif la_ == 2:
localctx = SqlBaseParser.QuantifiedComparisonContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1591
self.comparisonOperator()
self.state = 1592
self.comparisonQuantifier()
self.state = 1593
self.match(SqlBaseParser.T__1)
self.state = 1594
self.query()
self.state = 1595
self.match(SqlBaseParser.T__2)
elif la_ == 3:
localctx = SqlBaseParser.BetweenContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1598
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1597
self.match(SqlBaseParser.NOT)
self.state = 1600
self.match(SqlBaseParser.BETWEEN)
self.state = 1601
localctx.lower = self.valueExpression(0)
self.state = 1602
self.match(SqlBaseParser.AND)
self.state = 1603
localctx.upper = self.valueExpression(0)
elif la_ == 4:
localctx = SqlBaseParser.InListContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 1606
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1605
self.match(SqlBaseParser.NOT)
self.state = 1608
self.match(SqlBaseParser.IN)
self.state = 1609
self.match(SqlBaseParser.T__1)
self.state = 1610
self.expression()
self.state = 1615
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1611
self.match(SqlBaseParser.T__3)
self.state = 1612
self.expression()
self.state = 1617
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1618
self.match(SqlBaseParser.T__2)
elif la_ == 5:
localctx = SqlBaseParser.InSubqueryContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 1621
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1620
self.match(SqlBaseParser.NOT)
self.state = 1623
self.match(SqlBaseParser.IN)
self.state = 1624
self.match(SqlBaseParser.T__1)
self.state = 1625
self.query()
self.state = 1626
self.match(SqlBaseParser.T__2)
elif la_ == 6:
localctx = SqlBaseParser.LikeContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 1629
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1628
self.match(SqlBaseParser.NOT)
self.state = 1631
self.match(SqlBaseParser.LIKE)
self.state = 1632
localctx.pattern = self.valueExpression(0)
self.state = 1635
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,212,self._ctx)
if la_ == 1:
self.state = 1633
self.match(SqlBaseParser.ESCAPE)
self.state = 1634
localctx.escape = self.valueExpression(0)
elif la_ == 7:
localctx = SqlBaseParser.NullPredicateContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 1637
self.match(SqlBaseParser.IS)
self.state = 1639
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1638
self.match(SqlBaseParser.NOT)
self.state = 1641
self.match(SqlBaseParser.NULL)
elif la_ == 8:
localctx = SqlBaseParser.DistinctFromContext(self, localctx)
self.enterOuterAlt(localctx, 8)
self.state = 1642
self.match(SqlBaseParser.IS)
self.state = 1644
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1643
self.match(SqlBaseParser.NOT)
self.state = 1646
self.match(SqlBaseParser.DISTINCT)
self.state = 1647
self.match(SqlBaseParser.FROM)
self.state = 1648
localctx.right = self.valueExpression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_valueExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ValueExpressionDefaultContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(SqlBaseParser.PrimaryExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterValueExpressionDefault" ):
listener.enterValueExpressionDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitValueExpressionDefault" ):
listener.exitValueExpressionDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitValueExpressionDefault" ):
return visitor.visitValueExpressionDefault(self)
else:
return visitor.visitChildren(self)
class ConcatenationContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.left = None # ValueExpressionContext
self.right = None # ValueExpressionContext
self.copyFrom(ctx)
def CONCAT(self):
return self.getToken(SqlBaseParser.CONCAT, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConcatenation" ):
listener.enterConcatenation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConcatenation" ):
listener.exitConcatenation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConcatenation" ):
return visitor.visitConcatenation(self)
else:
return visitor.visitChildren(self)
class ArithmeticBinaryContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.left = None # ValueExpressionContext
self.operator = None # Token
self.right = None # ValueExpressionContext
self.copyFrom(ctx)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def ASTERISK(self):
return self.getToken(SqlBaseParser.ASTERISK, 0)
def SLASH(self):
return self.getToken(SqlBaseParser.SLASH, 0)
def PERCENT(self):
return self.getToken(SqlBaseParser.PERCENT, 0)
def PLUS(self):
return self.getToken(SqlBaseParser.PLUS, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArithmeticBinary" ):
listener.enterArithmeticBinary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArithmeticBinary" ):
listener.exitArithmeticBinary(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArithmeticBinary" ):
return visitor.visitArithmeticBinary(self)
else:
return visitor.visitChildren(self)
class ArithmeticUnaryContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.operator = None # Token
self.copyFrom(ctx)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def PLUS(self):
return self.getToken(SqlBaseParser.PLUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArithmeticUnary" ):
listener.enterArithmeticUnary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArithmeticUnary" ):
listener.exitArithmeticUnary(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArithmeticUnary" ):
return visitor.visitArithmeticUnary(self)
else:
return visitor.visitChildren(self)
class AtTimeZoneContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def AT(self):
return self.getToken(SqlBaseParser.AT, 0)
def timeZoneSpecifier(self):
return self.getTypedRuleContext(SqlBaseParser.TimeZoneSpecifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtTimeZone" ):
listener.enterAtTimeZone(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtTimeZone" ):
listener.exitAtTimeZone(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtTimeZone" ):
return visitor.visitAtTimeZone(self)
else:
return visitor.visitChildren(self)
def valueExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.ValueExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 98
self.enterRecursionRule(localctx, 98, self.RULE_valueExpression, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1655
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,216,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ValueExpressionDefaultContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1652
self.primaryExpression(0)
elif la_ == 2:
localctx = SqlBaseParser.ArithmeticUnaryContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1653
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.PLUS or _la==SqlBaseParser.MINUS):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1654
self.valueExpression(4)
self._ctx.stop = self._input.LT(-1)
self.state = 1671
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,218,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 1669
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,217,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ArithmeticBinaryContext(self, SqlBaseParser.ValueExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_valueExpression)
self.state = 1657
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 1658
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(((((_la - 263)) & ~0x3f) == 0 and ((1 << (_la - 263)) & ((1 << (SqlBaseParser.ASTERISK - 263)) | (1 << (SqlBaseParser.SLASH - 263)) | (1 << (SqlBaseParser.PERCENT - 263)))) != 0)):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1659
localctx.right = self.valueExpression(4)
elif la_ == 2:
localctx = SqlBaseParser.ArithmeticBinaryContext(self, SqlBaseParser.ValueExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_valueExpression)
self.state = 1660
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 1661
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.PLUS or _la==SqlBaseParser.MINUS):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1662
localctx.right = self.valueExpression(3)
elif la_ == 3:
localctx = SqlBaseParser.ConcatenationContext(self, SqlBaseParser.ValueExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_valueExpression)
self.state = 1663
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1664
self.match(SqlBaseParser.CONCAT)
self.state = 1665
localctx.right = self.valueExpression(2)
elif la_ == 4:
localctx = SqlBaseParser.AtTimeZoneContext(self, SqlBaseParser.ValueExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_valueExpression)
self.state = 1666
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 1667
self.match(SqlBaseParser.AT)
self.state = 1668
self.timeZoneSpecifier()
self.state = 1673
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,218,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PrimaryExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_primaryExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class DereferenceContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.base = None # PrimaryExpressionContext
self.fieldName = None # IdentifierContext
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(SqlBaseParser.PrimaryExpressionContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDereference" ):
listener.enterDereference(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDereference" ):
listener.exitDereference(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDereference" ):
return visitor.visitDereference(self)
else:
return visitor.visitChildren(self)
class TypeConstructorContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def DOUBLE(self):
return self.getToken(SqlBaseParser.DOUBLE, 0)
def PRECISION(self):
return self.getToken(SqlBaseParser.PRECISION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeConstructor" ):
listener.enterTypeConstructor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeConstructor" ):
listener.exitTypeConstructor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeConstructor" ):
return visitor.visitTypeConstructor(self)
else:
return visitor.visitChildren(self)
class SpecialDateTimeFunctionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.precision = None # Token
self.copyFrom(ctx)
def CURRENT_DATE(self):
return self.getToken(SqlBaseParser.CURRENT_DATE, 0)
def CURRENT_TIME(self):
return self.getToken(SqlBaseParser.CURRENT_TIME, 0)
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def CURRENT_TIMESTAMP(self):
return self.getToken(SqlBaseParser.CURRENT_TIMESTAMP, 0)
def LOCALTIME(self):
return self.getToken(SqlBaseParser.LOCALTIME, 0)
def LOCALTIMESTAMP(self):
return self.getToken(SqlBaseParser.LOCALTIMESTAMP, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecialDateTimeFunction" ):
listener.enterSpecialDateTimeFunction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecialDateTimeFunction" ):
listener.exitSpecialDateTimeFunction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSpecialDateTimeFunction" ):
return visitor.visitSpecialDateTimeFunction(self)
else:
return visitor.visitChildren(self)
class SubstringContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def SUBSTRING(self):
return self.getToken(SqlBaseParser.SUBSTRING, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubstring" ):
listener.enterSubstring(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubstring" ):
listener.exitSubstring(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubstring" ):
return visitor.visitSubstring(self)
else:
return visitor.visitChildren(self)
class CastContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def CAST(self):
return self.getToken(SqlBaseParser.CAST, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def TRY_CAST(self):
return self.getToken(SqlBaseParser.TRY_CAST, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCast" ):
listener.enterCast(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCast" ):
listener.exitCast(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCast" ):
return visitor.visitCast(self)
else:
return visitor.visitChildren(self)
class ParenthesizedExpressionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParenthesizedExpression" ):
listener.enterParenthesizedExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParenthesizedExpression" ):
listener.exitParenthesizedExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParenthesizedExpression" ):
return visitor.visitParenthesizedExpression(self)
else:
return visitor.visitChildren(self)
class ParameterContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameter" ):
listener.enterParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameter" ):
listener.exitParameter(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameter" ):
return visitor.visitParameter(self)
else:
return visitor.visitChildren(self)
class NormalizeContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def NORMALIZE(self):
return self.getToken(SqlBaseParser.NORMALIZE, 0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def normalForm(self):
return self.getTypedRuleContext(SqlBaseParser.NormalFormContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNormalize" ):
listener.enterNormalize(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNormalize" ):
listener.exitNormalize(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNormalize" ):
return visitor.visitNormalize(self)
else:
return visitor.visitChildren(self)
class IntervalLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def interval(self):
return self.getTypedRuleContext(SqlBaseParser.IntervalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntervalLiteral" ):
listener.enterIntervalLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntervalLiteral" ):
listener.exitIntervalLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntervalLiteral" ):
return visitor.visitIntervalLiteral(self)
else:
return visitor.visitChildren(self)
class NumericLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def number(self):
return self.getTypedRuleContext(SqlBaseParser.NumberContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNumericLiteral" ):
listener.enterNumericLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNumericLiteral" ):
listener.exitNumericLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNumericLiteral" ):
return visitor.visitNumericLiteral(self)
else:
return visitor.visitChildren(self)
class BooleanLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def booleanValue(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanValueContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBooleanLiteral" ):
listener.enterBooleanLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBooleanLiteral" ):
listener.exitBooleanLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBooleanLiteral" ):
return visitor.visitBooleanLiteral(self)
else:
return visitor.visitChildren(self)
class SimpleCaseContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.operand = None # ExpressionContext
self.elseExpression = None # ExpressionContext
self.copyFrom(ctx)
def CASE(self):
return self.getToken(SqlBaseParser.CASE, 0)
def END(self):
return self.getToken(SqlBaseParser.END, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def whenClause(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.WhenClauseContext)
else:
return self.getTypedRuleContext(SqlBaseParser.WhenClauseContext,i)
def ELSE(self):
return self.getToken(SqlBaseParser.ELSE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSimpleCase" ):
listener.enterSimpleCase(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSimpleCase" ):
listener.exitSimpleCase(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSimpleCase" ):
return visitor.visitSimpleCase(self)
else:
return visitor.visitChildren(self)
class ColumnReferenceContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterColumnReference" ):
listener.enterColumnReference(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitColumnReference" ):
listener.exitColumnReference(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitColumnReference" ):
return visitor.visitColumnReference(self)
else:
return visitor.visitChildren(self)
class NullLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNullLiteral" ):
listener.enterNullLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNullLiteral" ):
listener.exitNullLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNullLiteral" ):
return visitor.visitNullLiteral(self)
else:
return visitor.visitChildren(self)
class RowConstructorContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowConstructor" ):
listener.enterRowConstructor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowConstructor" ):
listener.exitRowConstructor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowConstructor" ):
return visitor.visitRowConstructor(self)
else:
return visitor.visitChildren(self)
class SubscriptContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.value = None # PrimaryExpressionContext
self.index = None # ValueExpressionContext
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(SqlBaseParser.PrimaryExpressionContext,0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubscript" ):
listener.enterSubscript(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubscript" ):
listener.exitSubscript(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubscript" ):
return visitor.visitSubscript(self)
else:
return visitor.visitChildren(self)
class CurrentPathContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def CURRENT_PATH(self):
return self.getToken(SqlBaseParser.CURRENT_PATH, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentPath" ):
listener.enterCurrentPath(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentPath" ):
listener.exitCurrentPath(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentPath" ):
return visitor.visitCurrentPath(self)
else:
return visitor.visitChildren(self)
class SubqueryExpressionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubqueryExpression" ):
listener.enterSubqueryExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubqueryExpression" ):
listener.exitSubqueryExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubqueryExpression" ):
return visitor.visitSubqueryExpression(self)
else:
return visitor.visitChildren(self)
class Lambda_Context(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLambda_" ):
listener.enterLambda_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLambda_" ):
listener.exitLambda_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLambda_" ):
return visitor.visitLambda_(self)
else:
return visitor.visitChildren(self)
class BinaryLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def BINARY_LITERAL(self):
return self.getToken(SqlBaseParser.BINARY_LITERAL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBinaryLiteral" ):
listener.enterBinaryLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBinaryLiteral" ):
listener.exitBinaryLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBinaryLiteral" ):
return visitor.visitBinaryLiteral(self)
else:
return visitor.visitChildren(self)
class CurrentUserContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def CURRENT_USER(self):
return self.getToken(SqlBaseParser.CURRENT_USER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentUser" ):
listener.enterCurrentUser(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentUser" ):
listener.exitCurrentUser(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentUser" ):
return visitor.visitCurrentUser(self)
else:
return visitor.visitChildren(self)
class MeasureContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def over(self):
return self.getTypedRuleContext(SqlBaseParser.OverContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMeasure" ):
listener.enterMeasure(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMeasure" ):
listener.exitMeasure(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMeasure" ):
return visitor.visitMeasure(self)
else:
return visitor.visitChildren(self)
class ExtractContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def EXTRACT(self):
return self.getToken(SqlBaseParser.EXTRACT, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExtract" ):
listener.enterExtract(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExtract" ):
listener.exitExtract(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExtract" ):
return visitor.visitExtract(self)
else:
return visitor.visitChildren(self)
class StringLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringLiteral" ):
listener.enterStringLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringLiteral" ):
listener.exitStringLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStringLiteral" ):
return visitor.visitStringLiteral(self)
else:
return visitor.visitChildren(self)
class ArrayConstructorContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def ARRAY(self):
return self.getToken(SqlBaseParser.ARRAY, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArrayConstructor" ):
listener.enterArrayConstructor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArrayConstructor" ):
listener.exitArrayConstructor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArrayConstructor" ):
return visitor.visitArrayConstructor(self)
else:
return visitor.visitChildren(self)
class FunctionCallContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.label = None # IdentifierContext
self.copyFrom(ctx)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def ASTERISK(self):
return self.getToken(SqlBaseParser.ASTERISK, 0)
def processingMode(self):
return self.getTypedRuleContext(SqlBaseParser.ProcessingModeContext,0)
def filter_(self):
return self.getTypedRuleContext(SqlBaseParser.Filter_Context,0)
def over(self):
return self.getTypedRuleContext(SqlBaseParser.OverContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def nullTreatment(self):
return self.getTypedRuleContext(SqlBaseParser.NullTreatmentContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunctionCall" ):
listener.enterFunctionCall(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunctionCall" ):
listener.exitFunctionCall(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunctionCall" ):
return visitor.visitFunctionCall(self)
else:
return visitor.visitChildren(self)
class CurrentSchemaContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def CURRENT_SCHEMA(self):
return self.getToken(SqlBaseParser.CURRENT_SCHEMA, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentSchema" ):
listener.enterCurrentSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentSchema" ):
listener.exitCurrentSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentSchema" ):
return visitor.visitCurrentSchema(self)
else:
return visitor.visitChildren(self)
class ExistsContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExists" ):
listener.enterExists(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExists" ):
listener.exitExists(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExists" ):
return visitor.visitExists(self)
else:
return visitor.visitChildren(self)
class PositionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def POSITION(self):
return self.getToken(SqlBaseParser.POSITION, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPosition" ):
listener.enterPosition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPosition" ):
listener.exitPosition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPosition" ):
return visitor.visitPosition(self)
else:
return visitor.visitChildren(self)
class ListaggContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def LISTAGG(self):
return self.getToken(SqlBaseParser.LISTAGG, 0)
def WITHIN(self):
return self.getToken(SqlBaseParser.WITHIN, 0)
def GROUP(self):
return self.getToken(SqlBaseParser.GROUP, 0)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def OVERFLOW(self):
return self.getToken(SqlBaseParser.OVERFLOW, 0)
def listAggOverflowBehavior(self):
return self.getTypedRuleContext(SqlBaseParser.ListAggOverflowBehaviorContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListagg" ):
listener.enterListagg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListagg" ):
listener.exitListagg(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitListagg" ):
return visitor.visitListagg(self)
else:
return visitor.visitChildren(self)
class SearchedCaseContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.elseExpression = None # ExpressionContext
self.copyFrom(ctx)
def CASE(self):
return self.getToken(SqlBaseParser.CASE, 0)
def END(self):
return self.getToken(SqlBaseParser.END, 0)
def whenClause(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.WhenClauseContext)
else:
return self.getTypedRuleContext(SqlBaseParser.WhenClauseContext,i)
def ELSE(self):
return self.getToken(SqlBaseParser.ELSE, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSearchedCase" ):
listener.enterSearchedCase(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSearchedCase" ):
listener.exitSearchedCase(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSearchedCase" ):
return visitor.visitSearchedCase(self)
else:
return visitor.visitChildren(self)
class CurrentCatalogContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def CURRENT_CATALOG(self):
return self.getToken(SqlBaseParser.CURRENT_CATALOG, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentCatalog" ):
listener.enterCurrentCatalog(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentCatalog" ):
listener.exitCurrentCatalog(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentCatalog" ):
return visitor.visitCurrentCatalog(self)
else:
return visitor.visitChildren(self)
class GroupingOperationContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def GROUPING(self):
return self.getToken(SqlBaseParser.GROUPING, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGroupingOperation" ):
listener.enterGroupingOperation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGroupingOperation" ):
listener.exitGroupingOperation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGroupingOperation" ):
return visitor.visitGroupingOperation(self)
else:
return visitor.visitChildren(self)
def primaryExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.PrimaryExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 100
self.enterRecursionRule(localctx, 100, self.RULE_primaryExpression, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1962
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,254,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.NullLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1675
self.match(SqlBaseParser.NULL)
elif la_ == 2:
localctx = SqlBaseParser.IntervalLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1676
self.interval()
elif la_ == 3:
localctx = SqlBaseParser.TypeConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1677
self.identifier()
self.state = 1678
self.string()
elif la_ == 4:
localctx = SqlBaseParser.TypeConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1680
self.match(SqlBaseParser.DOUBLE)
self.state = 1681
self.match(SqlBaseParser.PRECISION)
self.state = 1682
self.string()
elif la_ == 5:
localctx = SqlBaseParser.NumericLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1683
self.number()
elif la_ == 6:
localctx = SqlBaseParser.BooleanLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1684
self.booleanValue()
elif la_ == 7:
localctx = SqlBaseParser.StringLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1685
self.string()
elif la_ == 8:
localctx = SqlBaseParser.BinaryLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1686
self.match(SqlBaseParser.BINARY_LITERAL)
elif la_ == 9:
localctx = SqlBaseParser.ParameterContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1687
self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 10:
localctx = SqlBaseParser.PositionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1688
self.match(SqlBaseParser.POSITION)
self.state = 1689
self.match(SqlBaseParser.T__1)
self.state = 1690
self.valueExpression(0)
self.state = 1691
self.match(SqlBaseParser.IN)
self.state = 1692
self.valueExpression(0)
self.state = 1693
self.match(SqlBaseParser.T__2)
elif la_ == 11:
localctx = SqlBaseParser.RowConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1695
self.match(SqlBaseParser.T__1)
self.state = 1696
self.expression()
self.state = 1699
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 1697
self.match(SqlBaseParser.T__3)
self.state = 1698
self.expression()
self.state = 1701
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==SqlBaseParser.T__3):
break
self.state = 1703
self.match(SqlBaseParser.T__2)
elif la_ == 12:
localctx = SqlBaseParser.RowConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1705
self.match(SqlBaseParser.ROW)
self.state = 1706
self.match(SqlBaseParser.T__1)
self.state = 1707
self.expression()
self.state = 1712
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1708
self.match(SqlBaseParser.T__3)
self.state = 1709
self.expression()
self.state = 1714
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1715
self.match(SqlBaseParser.T__2)
elif la_ == 13:
localctx = SqlBaseParser.ListaggContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1717
localctx.name = self.match(SqlBaseParser.LISTAGG)
self.state = 1718
self.match(SqlBaseParser.T__1)
self.state = 1720
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,221,self._ctx)
if la_ == 1:
self.state = 1719
self.setQuantifier()
self.state = 1722
self.expression()
self.state = 1725
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__3:
self.state = 1723
self.match(SqlBaseParser.T__3)
self.state = 1724
self.string()
self.state = 1730
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ON:
self.state = 1727
self.match(SqlBaseParser.ON)
self.state = 1728
self.match(SqlBaseParser.OVERFLOW)
self.state = 1729
self.listAggOverflowBehavior()
self.state = 1732
self.match(SqlBaseParser.T__2)
self.state = 1733
self.match(SqlBaseParser.WITHIN)
self.state = 1734
self.match(SqlBaseParser.GROUP)
self.state = 1735
self.match(SqlBaseParser.T__1)
self.state = 1736
self.match(SqlBaseParser.ORDER)
self.state = 1737
self.match(SqlBaseParser.BY)
self.state = 1738
self.sortItem()
self.state = 1743
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1739
self.match(SqlBaseParser.T__3)
self.state = 1740
self.sortItem()
self.state = 1745
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1746
self.match(SqlBaseParser.T__2)
elif la_ == 14:
localctx = SqlBaseParser.FunctionCallContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1749
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,225,self._ctx)
if la_ == 1:
self.state = 1748
self.processingMode()
self.state = 1751
self.qualifiedName()
self.state = 1752
self.match(SqlBaseParser.T__1)
self.state = 1756
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 1753
localctx.label = self.identifier()
self.state = 1754
self.match(SqlBaseParser.T__0)
self.state = 1758
self.match(SqlBaseParser.ASTERISK)
self.state = 1759
self.match(SqlBaseParser.T__2)
self.state = 1761
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,227,self._ctx)
if la_ == 1:
self.state = 1760
self.filter_()
self.state = 1764
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,228,self._ctx)
if la_ == 1:
self.state = 1763
self.over()
elif la_ == 15:
localctx = SqlBaseParser.FunctionCallContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1767
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,229,self._ctx)
if la_ == 1:
self.state = 1766
self.processingMode()
self.state = 1769
self.qualifiedName()
self.state = 1770
self.match(SqlBaseParser.T__1)
self.state = 1782
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTINCT - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1772
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,230,self._ctx)
if la_ == 1:
self.state = 1771
self.setQuantifier()
self.state = 1774
self.expression()
self.state = 1779
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1775
self.match(SqlBaseParser.T__3)
self.state = 1776
self.expression()
self.state = 1781
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1794
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ORDER:
self.state = 1784
self.match(SqlBaseParser.ORDER)
self.state = 1785
self.match(SqlBaseParser.BY)
self.state = 1786
self.sortItem()
self.state = 1791
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1787
self.match(SqlBaseParser.T__3)
self.state = 1788
self.sortItem()
self.state = 1793
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1796
self.match(SqlBaseParser.T__2)
self.state = 1798
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,235,self._ctx)
if la_ == 1:
self.state = 1797
self.filter_()
self.state = 1804
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,237,self._ctx)
if la_ == 1:
self.state = 1801
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IGNORE or _la==SqlBaseParser.RESPECT:
self.state = 1800
self.nullTreatment()
self.state = 1803
self.over()
elif la_ == 16:
localctx = SqlBaseParser.MeasureContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1806
self.identifier()
self.state = 1807
self.over()
elif la_ == 17:
localctx = SqlBaseParser.Lambda_Context(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1809
self.identifier()
self.state = 1810
self.match(SqlBaseParser.T__5)
self.state = 1811
self.expression()
elif la_ == 18:
localctx = SqlBaseParser.Lambda_Context(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1813
self.match(SqlBaseParser.T__1)
self.state = 1822
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 1814
self.identifier()
self.state = 1819
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1815
self.match(SqlBaseParser.T__3)
self.state = 1816
self.identifier()
self.state = 1821
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1824
self.match(SqlBaseParser.T__2)
self.state = 1825
self.match(SqlBaseParser.T__5)
self.state = 1826
self.expression()
elif la_ == 19:
localctx = SqlBaseParser.SubqueryExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1827
self.match(SqlBaseParser.T__1)
self.state = 1828
self.query()
self.state = 1829
self.match(SqlBaseParser.T__2)
elif la_ == 20:
localctx = SqlBaseParser.ExistsContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1831
self.match(SqlBaseParser.EXISTS)
self.state = 1832
self.match(SqlBaseParser.T__1)
self.state = 1833
self.query()
self.state = 1834
self.match(SqlBaseParser.T__2)
elif la_ == 21:
localctx = SqlBaseParser.SimpleCaseContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1836
self.match(SqlBaseParser.CASE)
self.state = 1837
localctx.operand = self.expression()
self.state = 1839
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 1838
self.whenClause()
self.state = 1841
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==SqlBaseParser.WHEN):
break
self.state = 1845
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ELSE:
self.state = 1843
self.match(SqlBaseParser.ELSE)
self.state = 1844
localctx.elseExpression = self.expression()
self.state = 1847
self.match(SqlBaseParser.END)
elif la_ == 22:
localctx = SqlBaseParser.SearchedCaseContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1849
self.match(SqlBaseParser.CASE)
self.state = 1851
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 1850
self.whenClause()
self.state = 1853
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==SqlBaseParser.WHEN):
break
self.state = 1857
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ELSE:
self.state = 1855
self.match(SqlBaseParser.ELSE)
self.state = 1856
localctx.elseExpression = self.expression()
self.state = 1859
self.match(SqlBaseParser.END)
elif la_ == 23:
localctx = SqlBaseParser.CastContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1861
self.match(SqlBaseParser.CAST)
self.state = 1862
self.match(SqlBaseParser.T__1)
self.state = 1863
self.expression()
self.state = 1864
self.match(SqlBaseParser.AS)
self.state = 1865
self.type_(0)
self.state = 1866
self.match(SqlBaseParser.T__2)
elif la_ == 24:
localctx = SqlBaseParser.CastContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1868
self.match(SqlBaseParser.TRY_CAST)
self.state = 1869
self.match(SqlBaseParser.T__1)
self.state = 1870
self.expression()
self.state = 1871
self.match(SqlBaseParser.AS)
self.state = 1872
self.type_(0)
self.state = 1873
self.match(SqlBaseParser.T__2)
elif la_ == 25:
localctx = SqlBaseParser.ArrayConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1875
self.match(SqlBaseParser.ARRAY)
self.state = 1876
self.match(SqlBaseParser.T__6)
self.state = 1885
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1877
self.expression()
self.state = 1882
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1878
self.match(SqlBaseParser.T__3)
self.state = 1879
self.expression()
self.state = 1884
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1887
self.match(SqlBaseParser.T__7)
elif la_ == 26:
localctx = SqlBaseParser.ColumnReferenceContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1888
self.identifier()
elif la_ == 27:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1889
localctx.name = self.match(SqlBaseParser.CURRENT_DATE)
elif la_ == 28:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1890
localctx.name = self.match(SqlBaseParser.CURRENT_TIME)
self.state = 1894
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,246,self._ctx)
if la_ == 1:
self.state = 1891
self.match(SqlBaseParser.T__1)
self.state = 1892
localctx.precision = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 1893
self.match(SqlBaseParser.T__2)
elif la_ == 29:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1896
localctx.name = self.match(SqlBaseParser.CURRENT_TIMESTAMP)
self.state = 1900
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,247,self._ctx)
if la_ == 1:
self.state = 1897
self.match(SqlBaseParser.T__1)
self.state = 1898
localctx.precision = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 1899
self.match(SqlBaseParser.T__2)
elif la_ == 30:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1902
localctx.name = self.match(SqlBaseParser.LOCALTIME)
self.state = 1906
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,248,self._ctx)
if la_ == 1:
self.state = 1903
self.match(SqlBaseParser.T__1)
self.state = 1904
localctx.precision = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 1905
self.match(SqlBaseParser.T__2)
elif la_ == 31:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1908
localctx.name = self.match(SqlBaseParser.LOCALTIMESTAMP)
self.state = 1912
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,249,self._ctx)
if la_ == 1:
self.state = 1909
self.match(SqlBaseParser.T__1)
self.state = 1910
localctx.precision = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 1911
self.match(SqlBaseParser.T__2)
elif la_ == 32:
localctx = SqlBaseParser.CurrentUserContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1914
localctx.name = self.match(SqlBaseParser.CURRENT_USER)
elif la_ == 33:
localctx = SqlBaseParser.CurrentCatalogContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1915
localctx.name = self.match(SqlBaseParser.CURRENT_CATALOG)
elif la_ == 34:
localctx = SqlBaseParser.CurrentSchemaContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1916
localctx.name = self.match(SqlBaseParser.CURRENT_SCHEMA)
elif la_ == 35:
localctx = SqlBaseParser.CurrentPathContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1917
localctx.name = self.match(SqlBaseParser.CURRENT_PATH)
elif la_ == 36:
localctx = SqlBaseParser.SubstringContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1918
self.match(SqlBaseParser.SUBSTRING)
self.state = 1919
self.match(SqlBaseParser.T__1)
self.state = 1920
self.valueExpression(0)
self.state = 1921
self.match(SqlBaseParser.FROM)
self.state = 1922
self.valueExpression(0)
self.state = 1925
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FOR:
self.state = 1923
self.match(SqlBaseParser.FOR)
self.state = 1924
self.valueExpression(0)
self.state = 1927
self.match(SqlBaseParser.T__2)
elif la_ == 37:
localctx = SqlBaseParser.NormalizeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1929
self.match(SqlBaseParser.NORMALIZE)
self.state = 1930
self.match(SqlBaseParser.T__1)
self.state = 1931
self.valueExpression(0)
self.state = 1934
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__3:
self.state = 1932
self.match(SqlBaseParser.T__3)
self.state = 1933
self.normalForm()
self.state = 1936
self.match(SqlBaseParser.T__2)
elif la_ == 38:
localctx = SqlBaseParser.ExtractContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1938
self.match(SqlBaseParser.EXTRACT)
self.state = 1939
self.match(SqlBaseParser.T__1)
self.state = 1940
self.identifier()
self.state = 1941
self.match(SqlBaseParser.FROM)
self.state = 1942
self.valueExpression(0)
self.state = 1943
self.match(SqlBaseParser.T__2)
elif la_ == 39:
localctx = SqlBaseParser.ParenthesizedExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1945
self.match(SqlBaseParser.T__1)
self.state = 1946
self.expression()
self.state = 1947
self.match(SqlBaseParser.T__2)
elif la_ == 40:
localctx = SqlBaseParser.GroupingOperationContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1949
self.match(SqlBaseParser.GROUPING)
self.state = 1950
self.match(SqlBaseParser.T__1)
self.state = 1959
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 1951
self.qualifiedName()
self.state = 1956
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1952
self.match(SqlBaseParser.T__3)
self.state = 1953
self.qualifiedName()
self.state = 1958
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1961
self.match(SqlBaseParser.T__2)
self._ctx.stop = self._input.LT(-1)
self.state = 1974
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,256,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 1972
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,255,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.SubscriptContext(self, SqlBaseParser.PrimaryExpressionContext(self, _parentctx, _parentState))
localctx.value = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_primaryExpression)
self.state = 1964
if not self.precpred(self._ctx, 17):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 17)")
self.state = 1965
self.match(SqlBaseParser.T__6)
self.state = 1966
localctx.index = self.valueExpression(0)
self.state = 1967
self.match(SqlBaseParser.T__7)
elif la_ == 2:
localctx = SqlBaseParser.DereferenceContext(self, SqlBaseParser.PrimaryExpressionContext(self, _parentctx, _parentState))
localctx.base = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_primaryExpression)
self.state = 1969
if not self.precpred(self._ctx, 15):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 15)")
self.state = 1970
self.match(SqlBaseParser.T__0)
self.state = 1971
localctx.fieldName = self.identifier()
self.state = 1976
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,256,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ProcessingModeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RUNNING(self):
return self.getToken(SqlBaseParser.RUNNING, 0)
def FINAL(self):
return self.getToken(SqlBaseParser.FINAL, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_processingMode
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProcessingMode" ):
listener.enterProcessingMode(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProcessingMode" ):
listener.exitProcessingMode(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProcessingMode" ):
return visitor.visitProcessingMode(self)
else:
return visitor.visitChildren(self)
def processingMode(self):
localctx = SqlBaseParser.ProcessingModeContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_processingMode)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1977
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FINAL or _la==SqlBaseParser.RUNNING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NullTreatmentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IGNORE(self):
return self.getToken(SqlBaseParser.IGNORE, 0)
def NULLS(self):
return self.getToken(SqlBaseParser.NULLS, 0)
def RESPECT(self):
return self.getToken(SqlBaseParser.RESPECT, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_nullTreatment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNullTreatment" ):
listener.enterNullTreatment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNullTreatment" ):
listener.exitNullTreatment(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNullTreatment" ):
return visitor.visitNullTreatment(self)
else:
return visitor.visitChildren(self)
def nullTreatment(self):
localctx = SqlBaseParser.NullTreatmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_nullTreatment)
try:
self.state = 1983
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.IGNORE]:
self.enterOuterAlt(localctx, 1)
self.state = 1979
self.match(SqlBaseParser.IGNORE)
self.state = 1980
self.match(SqlBaseParser.NULLS)
elif token in [SqlBaseParser.RESPECT]:
self.enterOuterAlt(localctx, 2)
self.state = 1981
self.match(SqlBaseParser.RESPECT)
self.state = 1982
self.match(SqlBaseParser.NULLS)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StringContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_string
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class UnicodeStringLiteralContext(StringContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StringContext
super().__init__(parser)
self.copyFrom(ctx)
def UNICODE_STRING(self):
return self.getToken(SqlBaseParser.UNICODE_STRING, 0)
def UESCAPE(self):
return self.getToken(SqlBaseParser.UESCAPE, 0)
def STRING(self):
return self.getToken(SqlBaseParser.STRING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnicodeStringLiteral" ):
listener.enterUnicodeStringLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnicodeStringLiteral" ):
listener.exitUnicodeStringLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnicodeStringLiteral" ):
return visitor.visitUnicodeStringLiteral(self)
else:
return visitor.visitChildren(self)
class BasicStringLiteralContext(StringContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StringContext
super().__init__(parser)
self.copyFrom(ctx)
def STRING(self):
return self.getToken(SqlBaseParser.STRING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBasicStringLiteral" ):
listener.enterBasicStringLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBasicStringLiteral" ):
listener.exitBasicStringLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBasicStringLiteral" ):
return visitor.visitBasicStringLiteral(self)
else:
return visitor.visitChildren(self)
def string(self):
localctx = SqlBaseParser.StringContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_string)
try:
self.state = 1991
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.STRING]:
localctx = SqlBaseParser.BasicStringLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1985
self.match(SqlBaseParser.STRING)
elif token in [SqlBaseParser.UNICODE_STRING]:
localctx = SqlBaseParser.UnicodeStringLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1986
self.match(SqlBaseParser.UNICODE_STRING)
self.state = 1989
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,258,self._ctx)
if la_ == 1:
self.state = 1987
self.match(SqlBaseParser.UESCAPE)
self.state = 1988
self.match(SqlBaseParser.STRING)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TimeZoneSpecifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_timeZoneSpecifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class TimeZoneIntervalContext(TimeZoneSpecifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.TimeZoneSpecifierContext
super().__init__(parser)
self.copyFrom(ctx)
def TIME(self):
return self.getToken(SqlBaseParser.TIME, 0)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def interval(self):
return self.getTypedRuleContext(SqlBaseParser.IntervalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTimeZoneInterval" ):
listener.enterTimeZoneInterval(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTimeZoneInterval" ):
listener.exitTimeZoneInterval(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTimeZoneInterval" ):
return visitor.visitTimeZoneInterval(self)
else:
return visitor.visitChildren(self)
class TimeZoneStringContext(TimeZoneSpecifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.TimeZoneSpecifierContext
super().__init__(parser)
self.copyFrom(ctx)
def TIME(self):
return self.getToken(SqlBaseParser.TIME, 0)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTimeZoneString" ):
listener.enterTimeZoneString(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTimeZoneString" ):
listener.exitTimeZoneString(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTimeZoneString" ):
return visitor.visitTimeZoneString(self)
else:
return visitor.visitChildren(self)
def timeZoneSpecifier(self):
localctx = SqlBaseParser.TimeZoneSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_timeZoneSpecifier)
try:
self.state = 1999
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,260,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.TimeZoneIntervalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1993
self.match(SqlBaseParser.TIME)
self.state = 1994
self.match(SqlBaseParser.ZONE)
self.state = 1995
self.interval()
elif la_ == 2:
localctx = SqlBaseParser.TimeZoneStringContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1996
self.match(SqlBaseParser.TIME)
self.state = 1997
self.match(SqlBaseParser.ZONE)
self.state = 1998
self.string()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ComparisonOperatorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def NEQ(self):
return self.getToken(SqlBaseParser.NEQ, 0)
def LT(self):
return self.getToken(SqlBaseParser.LT, 0)
def LTE(self):
return self.getToken(SqlBaseParser.LTE, 0)
def GT(self):
return self.getToken(SqlBaseParser.GT, 0)
def GTE(self):
return self.getToken(SqlBaseParser.GTE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_comparisonOperator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComparisonOperator" ):
listener.enterComparisonOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComparisonOperator" ):
listener.exitComparisonOperator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitComparisonOperator" ):
return visitor.visitComparisonOperator(self)
else:
return visitor.visitChildren(self)
def comparisonOperator(self):
localctx = SqlBaseParser.ComparisonOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 110, self.RULE_comparisonOperator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2001
_la = self._input.LA(1)
if not(((((_la - 255)) & ~0x3f) == 0 and ((1 << (_la - 255)) & ((1 << (SqlBaseParser.EQ - 255)) | (1 << (SqlBaseParser.NEQ - 255)) | (1 << (SqlBaseParser.LT - 255)) | (1 << (SqlBaseParser.LTE - 255)) | (1 << (SqlBaseParser.GT - 255)) | (1 << (SqlBaseParser.GTE - 255)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ComparisonQuantifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def SOME(self):
return self.getToken(SqlBaseParser.SOME, 0)
def ANY(self):
return self.getToken(SqlBaseParser.ANY, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_comparisonQuantifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComparisonQuantifier" ):
listener.enterComparisonQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComparisonQuantifier" ):
listener.exitComparisonQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitComparisonQuantifier" ):
return visitor.visitComparisonQuantifier(self)
else:
return visitor.visitChildren(self)
def comparisonQuantifier(self):
localctx = SqlBaseParser.ComparisonQuantifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_comparisonQuantifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2003
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ALL or _la==SqlBaseParser.ANY or _la==SqlBaseParser.SOME):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BooleanValueContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TRUE(self):
return self.getToken(SqlBaseParser.TRUE, 0)
def FALSE(self):
return self.getToken(SqlBaseParser.FALSE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_booleanValue
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBooleanValue" ):
listener.enterBooleanValue(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBooleanValue" ):
listener.exitBooleanValue(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBooleanValue" ):
return visitor.visitBooleanValue(self)
else:
return visitor.visitChildren(self)
def booleanValue(self):
localctx = SqlBaseParser.BooleanValueContext(self, self._ctx, self.state)
self.enterRule(localctx, 114, self.RULE_booleanValue)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2005
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FALSE or _la==SqlBaseParser.TRUE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntervalContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.sign = None # Token
self.from_ = None # IntervalFieldContext
self.to = None # IntervalFieldContext
def INTERVAL(self):
return self.getToken(SqlBaseParser.INTERVAL, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def intervalField(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IntervalFieldContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IntervalFieldContext,i)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def PLUS(self):
return self.getToken(SqlBaseParser.PLUS, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_interval
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInterval" ):
listener.enterInterval(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInterval" ):
listener.exitInterval(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInterval" ):
return visitor.visitInterval(self)
else:
return visitor.visitChildren(self)
def interval(self):
localctx = SqlBaseParser.IntervalContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_interval)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2007
self.match(SqlBaseParser.INTERVAL)
self.state = 2009
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.PLUS or _la==SqlBaseParser.MINUS:
self.state = 2008
localctx.sign = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.PLUS or _la==SqlBaseParser.MINUS):
localctx.sign = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 2011
self.string()
self.state = 2012
localctx.from_ = self.intervalField()
self.state = 2015
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,262,self._ctx)
if la_ == 1:
self.state = 2013
self.match(SqlBaseParser.TO)
self.state = 2014
localctx.to = self.intervalField()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntervalFieldContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def YEAR(self):
return self.getToken(SqlBaseParser.YEAR, 0)
def MONTH(self):
return self.getToken(SqlBaseParser.MONTH, 0)
def DAY(self):
return self.getToken(SqlBaseParser.DAY, 0)
def HOUR(self):
return self.getToken(SqlBaseParser.HOUR, 0)
def MINUTE(self):
return self.getToken(SqlBaseParser.MINUTE, 0)
def SECOND(self):
return self.getToken(SqlBaseParser.SECOND, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_intervalField
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntervalField" ):
listener.enterIntervalField(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntervalField" ):
listener.exitIntervalField(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntervalField" ):
return visitor.visitIntervalField(self)
else:
return visitor.visitChildren(self)
def intervalField(self):
localctx = SqlBaseParser.IntervalFieldContext(self, self._ctx, self.state)
self.enterRule(localctx, 118, self.RULE_intervalField)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2017
_la = self._input.LA(1)
if not(_la==SqlBaseParser.DAY or ((((_la - 101)) & ~0x3f) == 0 and ((1 << (_la - 101)) & ((1 << (SqlBaseParser.HOUR - 101)) | (1 << (SqlBaseParser.MINUTE - 101)) | (1 << (SqlBaseParser.MONTH - 101)))) != 0) or _la==SqlBaseParser.SECOND or _la==SqlBaseParser.YEAR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NormalFormContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def NFD(self):
return self.getToken(SqlBaseParser.NFD, 0)
def NFC(self):
return self.getToken(SqlBaseParser.NFC, 0)
def NFKD(self):
return self.getToken(SqlBaseParser.NFKD, 0)
def NFKC(self):
return self.getToken(SqlBaseParser.NFKC, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_normalForm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNormalForm" ):
listener.enterNormalForm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNormalForm" ):
listener.exitNormalForm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNormalForm" ):
return visitor.visitNormalForm(self)
else:
return visitor.visitChildren(self)
def normalForm(self):
localctx = SqlBaseParser.NormalFormContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_normalForm)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2019
_la = self._input.LA(1)
if not(((((_la - 142)) & ~0x3f) == 0 and ((1 << (_la - 142)) & ((1 << (SqlBaseParser.NFC - 142)) | (1 << (SqlBaseParser.NFD - 142)) | (1 << (SqlBaseParser.NFKC - 142)) | (1 << (SqlBaseParser.NFKD - 142)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Type_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_type_
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RowTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def rowField(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowFieldContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowFieldContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowType" ):
listener.enterRowType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowType" ):
listener.exitRowType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowType" ):
return visitor.visitRowType(self)
else:
return visitor.visitChildren(self)
class IntervalTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.from_ = None # IntervalFieldContext
self.to = None # IntervalFieldContext
self.copyFrom(ctx)
def INTERVAL(self):
return self.getToken(SqlBaseParser.INTERVAL, 0)
def intervalField(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IntervalFieldContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IntervalFieldContext,i)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntervalType" ):
listener.enterIntervalType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntervalType" ):
listener.exitIntervalType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntervalType" ):
return visitor.visitIntervalType(self)
else:
return visitor.visitChildren(self)
class ArrayTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def ARRAY(self):
return self.getToken(SqlBaseParser.ARRAY, 0)
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArrayType" ):
listener.enterArrayType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArrayType" ):
listener.exitArrayType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArrayType" ):
return visitor.visitArrayType(self)
else:
return visitor.visitChildren(self)
class DoublePrecisionTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def DOUBLE(self):
return self.getToken(SqlBaseParser.DOUBLE, 0)
def PRECISION(self):
return self.getToken(SqlBaseParser.PRECISION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDoublePrecisionType" ):
listener.enterDoublePrecisionType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDoublePrecisionType" ):
listener.exitDoublePrecisionType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDoublePrecisionType" ):
return visitor.visitDoublePrecisionType(self)
else:
return visitor.visitChildren(self)
class LegacyArrayTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def ARRAY(self):
return self.getToken(SqlBaseParser.ARRAY, 0)
def LT(self):
return self.getToken(SqlBaseParser.LT, 0)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def GT(self):
return self.getToken(SqlBaseParser.GT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLegacyArrayType" ):
listener.enterLegacyArrayType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLegacyArrayType" ):
listener.exitLegacyArrayType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLegacyArrayType" ):
return visitor.visitLegacyArrayType(self)
else:
return visitor.visitChildren(self)
class GenericTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def typeParameter(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.TypeParameterContext)
else:
return self.getTypedRuleContext(SqlBaseParser.TypeParameterContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenericType" ):
listener.enterGenericType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenericType" ):
listener.exitGenericType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGenericType" ):
return visitor.visitGenericType(self)
else:
return visitor.visitChildren(self)
class DateTimeTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.base = None # Token
self.precision = None # TypeParameterContext
self.copyFrom(ctx)
def TIMESTAMP(self):
return self.getToken(SqlBaseParser.TIMESTAMP, 0)
def WITHOUT(self):
return self.getToken(SqlBaseParser.WITHOUT, 0)
def TIME(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.TIME)
else:
return self.getToken(SqlBaseParser.TIME, i)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def typeParameter(self):
return self.getTypedRuleContext(SqlBaseParser.TypeParameterContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDateTimeType" ):
listener.enterDateTimeType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDateTimeType" ):
listener.exitDateTimeType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDateTimeType" ):
return visitor.visitDateTimeType(self)
else:
return visitor.visitChildren(self)
class LegacyMapTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.keyType = None # Type_Context
self.valueType = None # Type_Context
self.copyFrom(ctx)
def MAP(self):
return self.getToken(SqlBaseParser.MAP, 0)
def LT(self):
return self.getToken(SqlBaseParser.LT, 0)
def GT(self):
return self.getToken(SqlBaseParser.GT, 0)
def type_(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.Type_Context)
else:
return self.getTypedRuleContext(SqlBaseParser.Type_Context,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLegacyMapType" ):
listener.enterLegacyMapType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLegacyMapType" ):
listener.exitLegacyMapType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLegacyMapType" ):
return visitor.visitLegacyMapType(self)
else:
return visitor.visitChildren(self)
def type_(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.Type_Context(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 122
self.enterRecursionRule(localctx, 122, self.RULE_type_, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2112
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,273,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.RowTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2022
self.match(SqlBaseParser.ROW)
self.state = 2023
self.match(SqlBaseParser.T__1)
self.state = 2024
self.rowField()
self.state = 2029
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2025
self.match(SqlBaseParser.T__3)
self.state = 2026
self.rowField()
self.state = 2031
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2032
self.match(SqlBaseParser.T__2)
elif la_ == 2:
localctx = SqlBaseParser.IntervalTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2034
self.match(SqlBaseParser.INTERVAL)
self.state = 2035
localctx.from_ = self.intervalField()
self.state = 2038
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,264,self._ctx)
if la_ == 1:
self.state = 2036
self.match(SqlBaseParser.TO)
self.state = 2037
localctx.to = self.intervalField()
elif la_ == 3:
localctx = SqlBaseParser.DateTimeTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2040
localctx.base = self.match(SqlBaseParser.TIMESTAMP)
self.state = 2045
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,265,self._ctx)
if la_ == 1:
self.state = 2041
self.match(SqlBaseParser.T__1)
self.state = 2042
localctx.precision = self.typeParameter()
self.state = 2043
self.match(SqlBaseParser.T__2)
self.state = 2050
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,266,self._ctx)
if la_ == 1:
self.state = 2047
self.match(SqlBaseParser.WITHOUT)
self.state = 2048
self.match(SqlBaseParser.TIME)
self.state = 2049
self.match(SqlBaseParser.ZONE)
elif la_ == 4:
localctx = SqlBaseParser.DateTimeTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2052
localctx.base = self.match(SqlBaseParser.TIMESTAMP)
self.state = 2057
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 2053
self.match(SqlBaseParser.T__1)
self.state = 2054
localctx.precision = self.typeParameter()
self.state = 2055
self.match(SqlBaseParser.T__2)
self.state = 2059
self.match(SqlBaseParser.WITH)
self.state = 2060
self.match(SqlBaseParser.TIME)
self.state = 2061
self.match(SqlBaseParser.ZONE)
elif la_ == 5:
localctx = SqlBaseParser.DateTimeTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2062
localctx.base = self.match(SqlBaseParser.TIME)
self.state = 2067
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,268,self._ctx)
if la_ == 1:
self.state = 2063
self.match(SqlBaseParser.T__1)
self.state = 2064
localctx.precision = self.typeParameter()
self.state = 2065
self.match(SqlBaseParser.T__2)
self.state = 2072
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,269,self._ctx)
if la_ == 1:
self.state = 2069
self.match(SqlBaseParser.WITHOUT)
self.state = 2070
self.match(SqlBaseParser.TIME)
self.state = 2071
self.match(SqlBaseParser.ZONE)
elif la_ == 6:
localctx = SqlBaseParser.DateTimeTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2074
localctx.base = self.match(SqlBaseParser.TIME)
self.state = 2079
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 2075
self.match(SqlBaseParser.T__1)
self.state = 2076
localctx.precision = self.typeParameter()
self.state = 2077
self.match(SqlBaseParser.T__2)
self.state = 2081
self.match(SqlBaseParser.WITH)
self.state = 2082
self.match(SqlBaseParser.TIME)
self.state = 2083
self.match(SqlBaseParser.ZONE)
elif la_ == 7:
localctx = SqlBaseParser.DoublePrecisionTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2084
self.match(SqlBaseParser.DOUBLE)
self.state = 2085
self.match(SqlBaseParser.PRECISION)
elif la_ == 8:
localctx = SqlBaseParser.LegacyArrayTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2086
self.match(SqlBaseParser.ARRAY)
self.state = 2087
self.match(SqlBaseParser.LT)
self.state = 2088
self.type_(0)
self.state = 2089
self.match(SqlBaseParser.GT)
elif la_ == 9:
localctx = SqlBaseParser.LegacyMapTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2091
self.match(SqlBaseParser.MAP)
self.state = 2092
self.match(SqlBaseParser.LT)
self.state = 2093
localctx.keyType = self.type_(0)
self.state = 2094
self.match(SqlBaseParser.T__3)
self.state = 2095
localctx.valueType = self.type_(0)
self.state = 2096
self.match(SqlBaseParser.GT)
elif la_ == 10:
localctx = SqlBaseParser.GenericTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2098
self.identifier()
self.state = 2110
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,272,self._ctx)
if la_ == 1:
self.state = 2099
self.match(SqlBaseParser.T__1)
self.state = 2100
self.typeParameter()
self.state = 2105
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2101
self.match(SqlBaseParser.T__3)
self.state = 2102
self.typeParameter()
self.state = 2107
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2108
self.match(SqlBaseParser.T__2)
self._ctx.stop = self._input.LT(-1)
self.state = 2123
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,275,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = SqlBaseParser.ArrayTypeContext(self, SqlBaseParser.Type_Context(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_type_)
self.state = 2114
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 2115
self.match(SqlBaseParser.ARRAY)
self.state = 2119
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,274,self._ctx)
if la_ == 1:
self.state = 2116
self.match(SqlBaseParser.T__6)
self.state = 2117
self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 2118
self.match(SqlBaseParser.T__7)
self.state = 2125
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,275,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class RowFieldContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_rowField
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowField" ):
listener.enterRowField(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowField" ):
listener.exitRowField(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowField" ):
return visitor.visitRowField(self)
else:
return visitor.visitChildren(self)
def rowField(self):
localctx = SqlBaseParser.RowFieldContext(self, self._ctx, self.state)
self.enterRule(localctx, 124, self.RULE_rowField)
try:
self.state = 2130
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,276,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 2126
self.type_(0)
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 2127
self.identifier()
self.state = 2128
self.type_(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeParameterContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_typeParameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeParameter" ):
listener.enterTypeParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeParameter" ):
listener.exitTypeParameter(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeParameter" ):
return visitor.visitTypeParameter(self)
else:
return visitor.visitChildren(self)
def typeParameter(self):
localctx = SqlBaseParser.TypeParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 126, self.RULE_typeParameter)
try:
self.state = 2134
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.INTEGER_VALUE]:
self.enterOuterAlt(localctx, 1)
self.state = 2132
self.match(SqlBaseParser.INTEGER_VALUE)
elif token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
self.enterOuterAlt(localctx, 2)
self.state = 2133
self.type_(0)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WhenClauseContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.condition = None # ExpressionContext
self.result = None # ExpressionContext
def WHEN(self):
return self.getToken(SqlBaseParser.WHEN, 0)
def THEN(self):
return self.getToken(SqlBaseParser.THEN, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_whenClause
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWhenClause" ):
listener.enterWhenClause(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWhenClause" ):
listener.exitWhenClause(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWhenClause" ):
return visitor.visitWhenClause(self)
else:
return visitor.visitChildren(self)
def whenClause(self):
localctx = SqlBaseParser.WhenClauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 128, self.RULE_whenClause)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2136
self.match(SqlBaseParser.WHEN)
self.state = 2137
localctx.condition = self.expression()
self.state = 2138
self.match(SqlBaseParser.THEN)
self.state = 2139
localctx.result = self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Filter_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def FILTER(self):
return self.getToken(SqlBaseParser.FILTER, 0)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_filter_
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFilter_" ):
listener.enterFilter_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFilter_" ):
listener.exitFilter_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFilter_" ):
return visitor.visitFilter_(self)
else:
return visitor.visitChildren(self)
def filter_(self):
localctx = SqlBaseParser.Filter_Context(self, self._ctx, self.state)
self.enterRule(localctx, 130, self.RULE_filter_)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2141
self.match(SqlBaseParser.FILTER)
self.state = 2142
self.match(SqlBaseParser.T__1)
self.state = 2143
self.match(SqlBaseParser.WHERE)
self.state = 2144
self.booleanExpression(0)
self.state = 2145
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MergeCaseContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_mergeCase
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class MergeInsertContext(MergeCaseContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.MergeCaseContext
super().__init__(parser)
self.condition = None # ExpressionContext
self._identifier = None # IdentifierContext
self.targets = list() # of IdentifierContexts
self._expression = None # ExpressionContext
self.values = list() # of ExpressionContexts
self.copyFrom(ctx)
def WHEN(self):
return self.getToken(SqlBaseParser.WHEN, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def MATCHED(self):
return self.getToken(SqlBaseParser.MATCHED, 0)
def THEN(self):
return self.getToken(SqlBaseParser.THEN, 0)
def INSERT(self):
return self.getToken(SqlBaseParser.INSERT, 0)
def VALUES(self):
return self.getToken(SqlBaseParser.VALUES, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMergeInsert" ):
listener.enterMergeInsert(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMergeInsert" ):
listener.exitMergeInsert(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMergeInsert" ):
return visitor.visitMergeInsert(self)
else:
return visitor.visitChildren(self)
class MergeUpdateContext(MergeCaseContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.MergeCaseContext
super().__init__(parser)
self.condition = None # ExpressionContext
self._identifier = None # IdentifierContext
self.targets = list() # of IdentifierContexts
self._expression = None # ExpressionContext
self.values = list() # of ExpressionContexts
self.copyFrom(ctx)
def WHEN(self):
return self.getToken(SqlBaseParser.WHEN, 0)
def MATCHED(self):
return self.getToken(SqlBaseParser.MATCHED, 0)
def THEN(self):
return self.getToken(SqlBaseParser.THEN, 0)
def UPDATE(self):
return self.getToken(SqlBaseParser.UPDATE, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def EQ(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.EQ)
else:
return self.getToken(SqlBaseParser.EQ, i)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMergeUpdate" ):
listener.enterMergeUpdate(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMergeUpdate" ):
listener.exitMergeUpdate(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMergeUpdate" ):
return visitor.visitMergeUpdate(self)
else:
return visitor.visitChildren(self)
class MergeDeleteContext(MergeCaseContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.MergeCaseContext
super().__init__(parser)
self.condition = None # ExpressionContext
self.copyFrom(ctx)
def WHEN(self):
return self.getToken(SqlBaseParser.WHEN, 0)
def MATCHED(self):
return self.getToken(SqlBaseParser.MATCHED, 0)
def THEN(self):
return self.getToken(SqlBaseParser.THEN, 0)
def DELETE(self):
return self.getToken(SqlBaseParser.DELETE, 0)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMergeDelete" ):
listener.enterMergeDelete(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMergeDelete" ):
listener.exitMergeDelete(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMergeDelete" ):
return visitor.visitMergeDelete(self)
else:
return visitor.visitChildren(self)
def mergeCase(self):
localctx = SqlBaseParser.MergeCaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 132, self.RULE_mergeCase)
self._la = 0 # Token type
try:
self.state = 2211
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,285,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.MergeUpdateContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2147
self.match(SqlBaseParser.WHEN)
self.state = 2148
self.match(SqlBaseParser.MATCHED)
self.state = 2151
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AND:
self.state = 2149
self.match(SqlBaseParser.AND)
self.state = 2150
localctx.condition = self.expression()
self.state = 2153
self.match(SqlBaseParser.THEN)
self.state = 2154
self.match(SqlBaseParser.UPDATE)
self.state = 2155
self.match(SqlBaseParser.SET)
self.state = 2156
localctx._identifier = self.identifier()
localctx.targets.append(localctx._identifier)
self.state = 2157
self.match(SqlBaseParser.EQ)
self.state = 2158
localctx._expression = self.expression()
localctx.values.append(localctx._expression)
self.state = 2166
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2159
self.match(SqlBaseParser.T__3)
self.state = 2160
localctx._identifier = self.identifier()
localctx.targets.append(localctx._identifier)
self.state = 2161
self.match(SqlBaseParser.EQ)
self.state = 2162
localctx._expression = self.expression()
localctx.values.append(localctx._expression)
self.state = 2168
self._errHandler.sync(self)
_la = self._input.LA(1)
elif la_ == 2:
localctx = SqlBaseParser.MergeDeleteContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2169
self.match(SqlBaseParser.WHEN)
self.state = 2170
self.match(SqlBaseParser.MATCHED)
self.state = 2173
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AND:
self.state = 2171
self.match(SqlBaseParser.AND)
self.state = 2172
localctx.condition = self.expression()
self.state = 2175
self.match(SqlBaseParser.THEN)
self.state = 2176
self.match(SqlBaseParser.DELETE)
elif la_ == 3:
localctx = SqlBaseParser.MergeInsertContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2177
self.match(SqlBaseParser.WHEN)
self.state = 2178
self.match(SqlBaseParser.NOT)
self.state = 2179
self.match(SqlBaseParser.MATCHED)
self.state = 2182
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AND:
self.state = 2180
self.match(SqlBaseParser.AND)
self.state = 2181
localctx.condition = self.expression()
self.state = 2184
self.match(SqlBaseParser.THEN)
self.state = 2185
self.match(SqlBaseParser.INSERT)
self.state = 2197
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 2186
self.match(SqlBaseParser.T__1)
self.state = 2187
localctx._identifier = self.identifier()
localctx.targets.append(localctx._identifier)
self.state = 2192
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2188
self.match(SqlBaseParser.T__3)
self.state = 2189
localctx._identifier = self.identifier()
localctx.targets.append(localctx._identifier)
self.state = 2194
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2195
self.match(SqlBaseParser.T__2)
self.state = 2199
self.match(SqlBaseParser.VALUES)
self.state = 2200
self.match(SqlBaseParser.T__1)
self.state = 2201
localctx._expression = self.expression()
localctx.values.append(localctx._expression)
self.state = 2206
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2202
self.match(SqlBaseParser.T__3)
self.state = 2203
localctx._expression = self.expression()
localctx.values.append(localctx._expression)
self.state = 2208
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2209
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OverContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.windowName = None # IdentifierContext
def OVER(self):
return self.getToken(SqlBaseParser.OVER, 0)
def windowSpecification(self):
return self.getTypedRuleContext(SqlBaseParser.WindowSpecificationContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_over
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOver" ):
listener.enterOver(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOver" ):
listener.exitOver(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOver" ):
return visitor.visitOver(self)
else:
return visitor.visitChildren(self)
def over(self):
localctx = SqlBaseParser.OverContext(self, self._ctx, self.state)
self.enterRule(localctx, 134, self.RULE_over)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2213
self.match(SqlBaseParser.OVER)
self.state = 2219
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
self.state = 2214
localctx.windowName = self.identifier()
elif token in [SqlBaseParser.T__1]:
self.state = 2215
self.match(SqlBaseParser.T__1)
self.state = 2216
self.windowSpecification()
self.state = 2217
self.match(SqlBaseParser.T__2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WindowFrameContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def frameExtent(self):
return self.getTypedRuleContext(SqlBaseParser.FrameExtentContext,0)
def MEASURES(self):
return self.getToken(SqlBaseParser.MEASURES, 0)
def measureDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.MeasureDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.MeasureDefinitionContext,i)
def AFTER(self):
return self.getToken(SqlBaseParser.AFTER, 0)
def MATCH(self):
return self.getToken(SqlBaseParser.MATCH, 0)
def skipTo(self):
return self.getTypedRuleContext(SqlBaseParser.SkipToContext,0)
def PATTERN(self):
return self.getToken(SqlBaseParser.PATTERN, 0)
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def SUBSET(self):
return self.getToken(SqlBaseParser.SUBSET, 0)
def subsetDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SubsetDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SubsetDefinitionContext,i)
def DEFINE(self):
return self.getToken(SqlBaseParser.DEFINE, 0)
def variableDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.VariableDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.VariableDefinitionContext,i)
def INITIAL(self):
return self.getToken(SqlBaseParser.INITIAL, 0)
def SEEK(self):
return self.getToken(SqlBaseParser.SEEK, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_windowFrame
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWindowFrame" ):
listener.enterWindowFrame(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWindowFrame" ):
listener.exitWindowFrame(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWindowFrame" ):
return visitor.visitWindowFrame(self)
else:
return visitor.visitChildren(self)
def windowFrame(self):
localctx = SqlBaseParser.WindowFrameContext(self, self._ctx, self.state)
self.enterRule(localctx, 136, self.RULE_windowFrame)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2230
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MEASURES:
self.state = 2221
self.match(SqlBaseParser.MEASURES)
self.state = 2222
self.measureDefinition()
self.state = 2227
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2223
self.match(SqlBaseParser.T__3)
self.state = 2224
self.measureDefinition()
self.state = 2229
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2232
self.frameExtent()
self.state = 2236
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AFTER:
self.state = 2233
self.match(SqlBaseParser.AFTER)
self.state = 2234
self.match(SqlBaseParser.MATCH)
self.state = 2235
self.skipTo()
self.state = 2239
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INITIAL or _la==SqlBaseParser.SEEK:
self.state = 2238
_la = self._input.LA(1)
if not(_la==SqlBaseParser.INITIAL or _la==SqlBaseParser.SEEK):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 2246
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.PATTERN:
self.state = 2241
self.match(SqlBaseParser.PATTERN)
self.state = 2242
self.match(SqlBaseParser.T__1)
self.state = 2243
self.rowPattern(0)
self.state = 2244
self.match(SqlBaseParser.T__2)
self.state = 2257
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.SUBSET:
self.state = 2248
self.match(SqlBaseParser.SUBSET)
self.state = 2249
self.subsetDefinition()
self.state = 2254
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2250
self.match(SqlBaseParser.T__3)
self.state = 2251
self.subsetDefinition()
self.state = 2256
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2268
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.DEFINE:
self.state = 2259
self.match(SqlBaseParser.DEFINE)
self.state = 2260
self.variableDefinition()
self.state = 2265
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2261
self.match(SqlBaseParser.T__3)
self.state = 2262
self.variableDefinition()
self.state = 2267
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FrameExtentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.frameType = None # Token
self.start = None # FrameBoundContext
self.end = None # FrameBoundContext
def RANGE(self):
return self.getToken(SqlBaseParser.RANGE, 0)
def frameBound(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.FrameBoundContext)
else:
return self.getTypedRuleContext(SqlBaseParser.FrameBoundContext,i)
def ROWS(self):
return self.getToken(SqlBaseParser.ROWS, 0)
def GROUPS(self):
return self.getToken(SqlBaseParser.GROUPS, 0)
def BETWEEN(self):
return self.getToken(SqlBaseParser.BETWEEN, 0)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_frameExtent
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFrameExtent" ):
listener.enterFrameExtent(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFrameExtent" ):
listener.exitFrameExtent(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFrameExtent" ):
return visitor.visitFrameExtent(self)
else:
return visitor.visitChildren(self)
def frameExtent(self):
localctx = SqlBaseParser.FrameExtentContext(self, self._ctx, self.state)
self.enterRule(localctx, 138, self.RULE_frameExtent)
try:
self.state = 2294
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,296,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 2270
localctx.frameType = self.match(SqlBaseParser.RANGE)
self.state = 2271
localctx.start = self.frameBound()
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 2272
localctx.frameType = self.match(SqlBaseParser.ROWS)
self.state = 2273
localctx.start = self.frameBound()
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 2274
localctx.frameType = self.match(SqlBaseParser.GROUPS)
self.state = 2275
localctx.start = self.frameBound()
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 2276
localctx.frameType = self.match(SqlBaseParser.RANGE)
self.state = 2277
self.match(SqlBaseParser.BETWEEN)
self.state = 2278
localctx.start = self.frameBound()
self.state = 2279
self.match(SqlBaseParser.AND)
self.state = 2280
localctx.end = self.frameBound()
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 2282
localctx.frameType = self.match(SqlBaseParser.ROWS)
self.state = 2283
self.match(SqlBaseParser.BETWEEN)
self.state = 2284
localctx.start = self.frameBound()
self.state = 2285
self.match(SqlBaseParser.AND)
self.state = 2286
localctx.end = self.frameBound()
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 2288
localctx.frameType = self.match(SqlBaseParser.GROUPS)
self.state = 2289
self.match(SqlBaseParser.BETWEEN)
self.state = 2290
localctx.start = self.frameBound()
self.state = 2291
self.match(SqlBaseParser.AND)
self.state = 2292
localctx.end = self.frameBound()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FrameBoundContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_frameBound
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class BoundedFrameContext(FrameBoundContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.FrameBoundContext
super().__init__(parser)
self.boundType = None # Token
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def PRECEDING(self):
return self.getToken(SqlBaseParser.PRECEDING, 0)
def FOLLOWING(self):
return self.getToken(SqlBaseParser.FOLLOWING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBoundedFrame" ):
listener.enterBoundedFrame(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBoundedFrame" ):
listener.exitBoundedFrame(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBoundedFrame" ):
return visitor.visitBoundedFrame(self)
else:
return visitor.visitChildren(self)
class UnboundedFrameContext(FrameBoundContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.FrameBoundContext
super().__init__(parser)
self.boundType = None # Token
self.copyFrom(ctx)
def UNBOUNDED(self):
return self.getToken(SqlBaseParser.UNBOUNDED, 0)
def PRECEDING(self):
return self.getToken(SqlBaseParser.PRECEDING, 0)
def FOLLOWING(self):
return self.getToken(SqlBaseParser.FOLLOWING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnboundedFrame" ):
listener.enterUnboundedFrame(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnboundedFrame" ):
listener.exitUnboundedFrame(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnboundedFrame" ):
return visitor.visitUnboundedFrame(self)
else:
return visitor.visitChildren(self)
class CurrentRowBoundContext(FrameBoundContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.FrameBoundContext
super().__init__(parser)
self.copyFrom(ctx)
def CURRENT(self):
return self.getToken(SqlBaseParser.CURRENT, 0)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentRowBound" ):
listener.enterCurrentRowBound(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentRowBound" ):
listener.exitCurrentRowBound(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentRowBound" ):
return visitor.visitCurrentRowBound(self)
else:
return visitor.visitChildren(self)
def frameBound(self):
localctx = SqlBaseParser.FrameBoundContext(self, self._ctx, self.state)
self.enterRule(localctx, 140, self.RULE_frameBound)
self._la = 0 # Token type
try:
self.state = 2305
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,297,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.UnboundedFrameContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2296
self.match(SqlBaseParser.UNBOUNDED)
self.state = 2297
localctx.boundType = self.match(SqlBaseParser.PRECEDING)
elif la_ == 2:
localctx = SqlBaseParser.UnboundedFrameContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2298
self.match(SqlBaseParser.UNBOUNDED)
self.state = 2299
localctx.boundType = self.match(SqlBaseParser.FOLLOWING)
elif la_ == 3:
localctx = SqlBaseParser.CurrentRowBoundContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2300
self.match(SqlBaseParser.CURRENT)
self.state = 2301
self.match(SqlBaseParser.ROW)
elif la_ == 4:
localctx = SqlBaseParser.BoundedFrameContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2302
self.expression()
self.state = 2303
localctx.boundType = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FOLLOWING or _la==SqlBaseParser.PRECEDING):
localctx.boundType = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RowPatternContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_rowPattern
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class QuantifiedPrimaryContext(RowPatternContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RowPatternContext
super().__init__(parser)
self.copyFrom(ctx)
def patternPrimary(self):
return self.getTypedRuleContext(SqlBaseParser.PatternPrimaryContext,0)
def patternQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.PatternQuantifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuantifiedPrimary" ):
listener.enterQuantifiedPrimary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuantifiedPrimary" ):
listener.exitQuantifiedPrimary(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuantifiedPrimary" ):
return visitor.visitQuantifiedPrimary(self)
else:
return visitor.visitChildren(self)
class PatternConcatenationContext(RowPatternContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RowPatternContext
super().__init__(parser)
self.copyFrom(ctx)
def rowPattern(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowPatternContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternConcatenation" ):
listener.enterPatternConcatenation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternConcatenation" ):
listener.exitPatternConcatenation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternConcatenation" ):
return visitor.visitPatternConcatenation(self)
else:
return visitor.visitChildren(self)
class PatternAlternationContext(RowPatternContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RowPatternContext
super().__init__(parser)
self.copyFrom(ctx)
def rowPattern(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowPatternContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternAlternation" ):
listener.enterPatternAlternation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternAlternation" ):
listener.exitPatternAlternation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternAlternation" ):
return visitor.visitPatternAlternation(self)
else:
return visitor.visitChildren(self)
def rowPattern(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.RowPatternContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 142
self.enterRecursionRule(localctx, 142, self.RULE_rowPattern, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = SqlBaseParser.QuantifiedPrimaryContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2308
self.patternPrimary()
self.state = 2310
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,298,self._ctx)
if la_ == 1:
self.state = 2309
self.patternQuantifier()
self._ctx.stop = self._input.LT(-1)
self.state = 2319
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,300,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 2317
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,299,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.PatternConcatenationContext(self, SqlBaseParser.RowPatternContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_rowPattern)
self.state = 2312
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 2313
self.rowPattern(3)
elif la_ == 2:
localctx = SqlBaseParser.PatternAlternationContext(self, SqlBaseParser.RowPatternContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_rowPattern)
self.state = 2314
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 2315
self.match(SqlBaseParser.T__8)
self.state = 2316
self.rowPattern(2)
self.state = 2321
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,300,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PatternPrimaryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_patternPrimary
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class PatternPermutationContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def PERMUTE(self):
return self.getToken(SqlBaseParser.PERMUTE, 0)
def rowPattern(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowPatternContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternPermutation" ):
listener.enterPatternPermutation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternPermutation" ):
listener.exitPatternPermutation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternPermutation" ):
return visitor.visitPatternPermutation(self)
else:
return visitor.visitChildren(self)
class PartitionEndAnchorContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPartitionEndAnchor" ):
listener.enterPartitionEndAnchor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPartitionEndAnchor" ):
listener.exitPartitionEndAnchor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPartitionEndAnchor" ):
return visitor.visitPartitionEndAnchor(self)
else:
return visitor.visitChildren(self)
class PatternVariableContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternVariable" ):
listener.enterPatternVariable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternVariable" ):
listener.exitPatternVariable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternVariable" ):
return visitor.visitPatternVariable(self)
else:
return visitor.visitChildren(self)
class ExcludedPatternContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExcludedPattern" ):
listener.enterExcludedPattern(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExcludedPattern" ):
listener.exitExcludedPattern(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExcludedPattern" ):
return visitor.visitExcludedPattern(self)
else:
return visitor.visitChildren(self)
class PartitionStartAnchorContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPartitionStartAnchor" ):
listener.enterPartitionStartAnchor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPartitionStartAnchor" ):
listener.exitPartitionStartAnchor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPartitionStartAnchor" ):
return visitor.visitPartitionStartAnchor(self)
else:
return visitor.visitChildren(self)
class EmptyPatternContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEmptyPattern" ):
listener.enterEmptyPattern(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEmptyPattern" ):
listener.exitEmptyPattern(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEmptyPattern" ):
return visitor.visitEmptyPattern(self)
else:
return visitor.visitChildren(self)
class GroupedPatternContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGroupedPattern" ):
listener.enterGroupedPattern(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGroupedPattern" ):
listener.exitGroupedPattern(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGroupedPattern" ):
return visitor.visitGroupedPattern(self)
else:
return visitor.visitChildren(self)
def patternPrimary(self):
localctx = SqlBaseParser.PatternPrimaryContext(self, self._ctx, self.state)
self.enterRule(localctx, 144, self.RULE_patternPrimary)
self._la = 0 # Token type
try:
self.state = 2347
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,302,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.PatternVariableContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2322
self.identifier()
elif la_ == 2:
localctx = SqlBaseParser.EmptyPatternContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2323
self.match(SqlBaseParser.T__1)
self.state = 2324
self.match(SqlBaseParser.T__2)
elif la_ == 3:
localctx = SqlBaseParser.PatternPermutationContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2325
self.match(SqlBaseParser.PERMUTE)
self.state = 2326
self.match(SqlBaseParser.T__1)
self.state = 2327
self.rowPattern(0)
self.state = 2332
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2328
self.match(SqlBaseParser.T__3)
self.state = 2329
self.rowPattern(0)
self.state = 2334
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2335
self.match(SqlBaseParser.T__2)
elif la_ == 4:
localctx = SqlBaseParser.GroupedPatternContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2337
self.match(SqlBaseParser.T__1)
self.state = 2338
self.rowPattern(0)
self.state = 2339
self.match(SqlBaseParser.T__2)
elif la_ == 5:
localctx = SqlBaseParser.PartitionStartAnchorContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 2341
self.match(SqlBaseParser.T__9)
elif la_ == 6:
localctx = SqlBaseParser.PartitionEndAnchorContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 2342
self.match(SqlBaseParser.T__10)
elif la_ == 7:
localctx = SqlBaseParser.ExcludedPatternContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 2343
self.match(SqlBaseParser.T__11)
self.state = 2344
self.rowPattern(0)
self.state = 2345
self.match(SqlBaseParser.T__12)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PatternQuantifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_patternQuantifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ZeroOrMoreQuantifierContext(PatternQuantifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternQuantifierContext
super().__init__(parser)
self.reluctant = None # Token
self.copyFrom(ctx)
def ASTERISK(self):
return self.getToken(SqlBaseParser.ASTERISK, 0)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterZeroOrMoreQuantifier" ):
listener.enterZeroOrMoreQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitZeroOrMoreQuantifier" ):
listener.exitZeroOrMoreQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitZeroOrMoreQuantifier" ):
return visitor.visitZeroOrMoreQuantifier(self)
else:
return visitor.visitChildren(self)
class OneOrMoreQuantifierContext(PatternQuantifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternQuantifierContext
super().__init__(parser)
self.reluctant = None # Token
self.copyFrom(ctx)
def PLUS(self):
return self.getToken(SqlBaseParser.PLUS, 0)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOneOrMoreQuantifier" ):
listener.enterOneOrMoreQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOneOrMoreQuantifier" ):
listener.exitOneOrMoreQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOneOrMoreQuantifier" ):
return visitor.visitOneOrMoreQuantifier(self)
else:
return visitor.visitChildren(self)
class ZeroOrOneQuantifierContext(PatternQuantifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternQuantifierContext
super().__init__(parser)
self.reluctant = None # Token
self.copyFrom(ctx)
def QUESTION_MARK(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.QUESTION_MARK)
else:
return self.getToken(SqlBaseParser.QUESTION_MARK, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterZeroOrOneQuantifier" ):
listener.enterZeroOrOneQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitZeroOrOneQuantifier" ):
listener.exitZeroOrOneQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitZeroOrOneQuantifier" ):
return visitor.visitZeroOrOneQuantifier(self)
else:
return visitor.visitChildren(self)
class RangeQuantifierContext(PatternQuantifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternQuantifierContext
super().__init__(parser)
self.exactly = None # Token
self.reluctant = None # Token
self.atLeast = None # Token
self.atMost = None # Token
self.copyFrom(ctx)
def INTEGER_VALUE(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.INTEGER_VALUE)
else:
return self.getToken(SqlBaseParser.INTEGER_VALUE, i)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangeQuantifier" ):
listener.enterRangeQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangeQuantifier" ):
listener.exitRangeQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRangeQuantifier" ):
return visitor.visitRangeQuantifier(self)
else:
return visitor.visitChildren(self)
def patternQuantifier(self):
localctx = SqlBaseParser.PatternQuantifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 146, self.RULE_patternQuantifier)
self._la = 0 # Token type
try:
self.state = 2379
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,310,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ZeroOrMoreQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2349
self.match(SqlBaseParser.ASTERISK)
self.state = 2351
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,303,self._ctx)
if la_ == 1:
self.state = 2350
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 2:
localctx = SqlBaseParser.OneOrMoreQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2353
self.match(SqlBaseParser.PLUS)
self.state = 2355
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,304,self._ctx)
if la_ == 1:
self.state = 2354
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 3:
localctx = SqlBaseParser.ZeroOrOneQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2357
self.match(SqlBaseParser.QUESTION_MARK)
self.state = 2359
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,305,self._ctx)
if la_ == 1:
self.state = 2358
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 4:
localctx = SqlBaseParser.RangeQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2361
self.match(SqlBaseParser.T__13)
self.state = 2362
localctx.exactly = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 2363
self.match(SqlBaseParser.T__14)
self.state = 2365
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,306,self._ctx)
if la_ == 1:
self.state = 2364
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 5:
localctx = SqlBaseParser.RangeQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 2367
self.match(SqlBaseParser.T__13)
self.state = 2369
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INTEGER_VALUE:
self.state = 2368
localctx.atLeast = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 2371
self.match(SqlBaseParser.T__3)
self.state = 2373
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INTEGER_VALUE:
self.state = 2372
localctx.atMost = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 2375
self.match(SqlBaseParser.T__14)
self.state = 2377
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,309,self._ctx)
if la_ == 1:
self.state = 2376
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UpdateAssignmentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_updateAssignment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUpdateAssignment" ):
listener.enterUpdateAssignment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUpdateAssignment" ):
listener.exitUpdateAssignment(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUpdateAssignment" ):
return visitor.visitUpdateAssignment(self)
else:
return visitor.visitChildren(self)
def updateAssignment(self):
localctx = SqlBaseParser.UpdateAssignmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 148, self.RULE_updateAssignment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2381
self.identifier()
self.state = 2382
self.match(SqlBaseParser.EQ)
self.state = 2383
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExplainOptionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_explainOption
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ExplainFormatContext(ExplainOptionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ExplainOptionContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def FORMAT(self):
return self.getToken(SqlBaseParser.FORMAT, 0)
def TEXT(self):
return self.getToken(SqlBaseParser.TEXT, 0)
def GRAPHVIZ(self):
return self.getToken(SqlBaseParser.GRAPHVIZ, 0)
def JSON(self):
return self.getToken(SqlBaseParser.JSON, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExplainFormat" ):
listener.enterExplainFormat(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExplainFormat" ):
listener.exitExplainFormat(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExplainFormat" ):
return visitor.visitExplainFormat(self)
else:
return visitor.visitChildren(self)
class ExplainTypeContext(ExplainOptionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ExplainOptionContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TYPE(self):
return self.getToken(SqlBaseParser.TYPE, 0)
def LOGICAL(self):
return self.getToken(SqlBaseParser.LOGICAL, 0)
def DISTRIBUTED(self):
return self.getToken(SqlBaseParser.DISTRIBUTED, 0)
def VALIDATE(self):
return self.getToken(SqlBaseParser.VALIDATE, 0)
def IO(self):
return self.getToken(SqlBaseParser.IO, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExplainType" ):
listener.enterExplainType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExplainType" ):
listener.exitExplainType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExplainType" ):
return visitor.visitExplainType(self)
else:
return visitor.visitChildren(self)
def explainOption(self):
localctx = SqlBaseParser.ExplainOptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 150, self.RULE_explainOption)
self._la = 0 # Token type
try:
self.state = 2389
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.FORMAT]:
localctx = SqlBaseParser.ExplainFormatContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2385
self.match(SqlBaseParser.FORMAT)
self.state = 2386
localctx.value = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.GRAPHVIZ or _la==SqlBaseParser.JSON or _la==SqlBaseParser.TEXT):
localctx.value = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
elif token in [SqlBaseParser.TYPE]:
localctx = SqlBaseParser.ExplainTypeContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2387
self.match(SqlBaseParser.TYPE)
self.state = 2388
localctx.value = self._input.LT(1)
_la = self._input.LA(1)
if not(((((_la - 68)) & ~0x3f) == 0 and ((1 << (_la - 68)) & ((1 << (SqlBaseParser.DISTRIBUTED - 68)) | (1 << (SqlBaseParser.IO - 68)) | (1 << (SqlBaseParser.LOGICAL - 68)))) != 0) or _la==SqlBaseParser.VALIDATE):
localctx.value = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TransactionModeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_transactionMode
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class TransactionAccessModeContext(TransactionModeContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.TransactionModeContext
super().__init__(parser)
self.accessMode = None # Token
self.copyFrom(ctx)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def ONLY(self):
return self.getToken(SqlBaseParser.ONLY, 0)
def WRITE(self):
return self.getToken(SqlBaseParser.WRITE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTransactionAccessMode" ):
listener.enterTransactionAccessMode(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTransactionAccessMode" ):
listener.exitTransactionAccessMode(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTransactionAccessMode" ):
return visitor.visitTransactionAccessMode(self)
else:
return visitor.visitChildren(self)
class IsolationLevelContext(TransactionModeContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.TransactionModeContext
super().__init__(parser)
self.copyFrom(ctx)
def ISOLATION(self):
return self.getToken(SqlBaseParser.ISOLATION, 0)
def LEVEL(self):
return self.getToken(SqlBaseParser.LEVEL, 0)
def levelOfIsolation(self):
return self.getTypedRuleContext(SqlBaseParser.LevelOfIsolationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIsolationLevel" ):
listener.enterIsolationLevel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIsolationLevel" ):
listener.exitIsolationLevel(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIsolationLevel" ):
return visitor.visitIsolationLevel(self)
else:
return visitor.visitChildren(self)
def transactionMode(self):
localctx = SqlBaseParser.TransactionModeContext(self, self._ctx, self.state)
self.enterRule(localctx, 152, self.RULE_transactionMode)
self._la = 0 # Token type
try:
self.state = 2396
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ISOLATION]:
localctx = SqlBaseParser.IsolationLevelContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2391
self.match(SqlBaseParser.ISOLATION)
self.state = 2392
self.match(SqlBaseParser.LEVEL)
self.state = 2393
self.levelOfIsolation()
elif token in [SqlBaseParser.READ]:
localctx = SqlBaseParser.TransactionAccessModeContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2394
self.match(SqlBaseParser.READ)
self.state = 2395
localctx.accessMode = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ONLY or _la==SqlBaseParser.WRITE):
localctx.accessMode = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LevelOfIsolationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_levelOfIsolation
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ReadUncommittedContext(LevelOfIsolationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.LevelOfIsolationContext
super().__init__(parser)
self.copyFrom(ctx)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def UNCOMMITTED(self):
return self.getToken(SqlBaseParser.UNCOMMITTED, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReadUncommitted" ):
listener.enterReadUncommitted(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReadUncommitted" ):
listener.exitReadUncommitted(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitReadUncommitted" ):
return visitor.visitReadUncommitted(self)
else:
return visitor.visitChildren(self)
class SerializableContext(LevelOfIsolationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.LevelOfIsolationContext
super().__init__(parser)
self.copyFrom(ctx)
def SERIALIZABLE(self):
return self.getToken(SqlBaseParser.SERIALIZABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSerializable" ):
listener.enterSerializable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSerializable" ):
listener.exitSerializable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSerializable" ):
return visitor.visitSerializable(self)
else:
return visitor.visitChildren(self)
class ReadCommittedContext(LevelOfIsolationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.LevelOfIsolationContext
super().__init__(parser)
self.copyFrom(ctx)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def COMMITTED(self):
return self.getToken(SqlBaseParser.COMMITTED, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReadCommitted" ):
listener.enterReadCommitted(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReadCommitted" ):
listener.exitReadCommitted(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitReadCommitted" ):
return visitor.visitReadCommitted(self)
else:
return visitor.visitChildren(self)
class RepeatableReadContext(LevelOfIsolationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.LevelOfIsolationContext
super().__init__(parser)
self.copyFrom(ctx)
def REPEATABLE(self):
return self.getToken(SqlBaseParser.REPEATABLE, 0)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRepeatableRead" ):
listener.enterRepeatableRead(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRepeatableRead" ):
listener.exitRepeatableRead(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRepeatableRead" ):
return visitor.visitRepeatableRead(self)
else:
return visitor.visitChildren(self)
def levelOfIsolation(self):
localctx = SqlBaseParser.LevelOfIsolationContext(self, self._ctx, self.state)
self.enterRule(localctx, 154, self.RULE_levelOfIsolation)
try:
self.state = 2405
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,313,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ReadUncommittedContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2398
self.match(SqlBaseParser.READ)
self.state = 2399
self.match(SqlBaseParser.UNCOMMITTED)
elif la_ == 2:
localctx = SqlBaseParser.ReadCommittedContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2400
self.match(SqlBaseParser.READ)
self.state = 2401
self.match(SqlBaseParser.COMMITTED)
elif la_ == 3:
localctx = SqlBaseParser.RepeatableReadContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2402
self.match(SqlBaseParser.REPEATABLE)
self.state = 2403
self.match(SqlBaseParser.READ)
elif la_ == 4:
localctx = SqlBaseParser.SerializableContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2404
self.match(SqlBaseParser.SERIALIZABLE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CallArgumentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_callArgument
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class PositionalArgumentContext(CallArgumentContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.CallArgumentContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPositionalArgument" ):
listener.enterPositionalArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPositionalArgument" ):
listener.exitPositionalArgument(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPositionalArgument" ):
return visitor.visitPositionalArgument(self)
else:
return visitor.visitChildren(self)
class NamedArgumentContext(CallArgumentContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.CallArgumentContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNamedArgument" ):
listener.enterNamedArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNamedArgument" ):
listener.exitNamedArgument(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNamedArgument" ):
return visitor.visitNamedArgument(self)
else:
return visitor.visitChildren(self)
def callArgument(self):
localctx = SqlBaseParser.CallArgumentContext(self, self._ctx, self.state)
self.enterRule(localctx, 156, self.RULE_callArgument)
try:
self.state = 2412
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,314,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.PositionalArgumentContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2407
self.expression()
elif la_ == 2:
localctx = SqlBaseParser.NamedArgumentContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2408
self.identifier()
self.state = 2409
self.match(SqlBaseParser.T__15)
self.state = 2410
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PathElementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_pathElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class QualifiedArgumentContext(PathElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PathElementContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQualifiedArgument" ):
listener.enterQualifiedArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQualifiedArgument" ):
listener.exitQualifiedArgument(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQualifiedArgument" ):
return visitor.visitQualifiedArgument(self)
else:
return visitor.visitChildren(self)
class UnqualifiedArgumentContext(PathElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PathElementContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnqualifiedArgument" ):
listener.enterUnqualifiedArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnqualifiedArgument" ):
listener.exitUnqualifiedArgument(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnqualifiedArgument" ):
return visitor.visitUnqualifiedArgument(self)
else:
return visitor.visitChildren(self)
def pathElement(self):
localctx = SqlBaseParser.PathElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 158, self.RULE_pathElement)
try:
self.state = 2419
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,315,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.QualifiedArgumentContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2414
self.identifier()
self.state = 2415
self.match(SqlBaseParser.T__0)
self.state = 2416
self.identifier()
elif la_ == 2:
localctx = SqlBaseParser.UnqualifiedArgumentContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2418
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PathSpecificationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def pathElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PathElementContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PathElementContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_pathSpecification
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPathSpecification" ):
listener.enterPathSpecification(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPathSpecification" ):
listener.exitPathSpecification(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPathSpecification" ):
return visitor.visitPathSpecification(self)
else:
return visitor.visitChildren(self)
def pathSpecification(self):
localctx = SqlBaseParser.PathSpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 160, self.RULE_pathSpecification)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2421
self.pathElement()
self.state = 2426
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2422
self.match(SqlBaseParser.T__3)
self.state = 2423
self.pathElement()
self.state = 2428
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrivilegeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def SELECT(self):
return self.getToken(SqlBaseParser.SELECT, 0)
def DELETE(self):
return self.getToken(SqlBaseParser.DELETE, 0)
def INSERT(self):
return self.getToken(SqlBaseParser.INSERT, 0)
def UPDATE(self):
return self.getToken(SqlBaseParser.UPDATE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_privilege
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrivilege" ):
listener.enterPrivilege(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrivilege" ):
listener.exitPrivilege(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrivilege" ):
return visitor.visitPrivilege(self)
else:
return visitor.visitChildren(self)
def privilege(self):
localctx = SqlBaseParser.PrivilegeContext(self, self._ctx, self.state)
self.enterRule(localctx, 162, self.RULE_privilege)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2429
_la = self._input.LA(1)
if not(_la==SqlBaseParser.CREATE or _la==SqlBaseParser.DELETE or _la==SqlBaseParser.INSERT or _la==SqlBaseParser.SELECT or _la==SqlBaseParser.UPDATE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QualifiedNameContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_qualifiedName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQualifiedName" ):
listener.enterQualifiedName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQualifiedName" ):
listener.exitQualifiedName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQualifiedName" ):
return visitor.visitQualifiedName(self)
else:
return visitor.visitChildren(self)
def qualifiedName(self):
localctx = SqlBaseParser.QualifiedNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 164, self.RULE_qualifiedName)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2431
self.identifier()
self.state = 2436
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,317,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 2432
self.match(SqlBaseParser.T__0)
self.state = 2433
self.identifier()
self.state = 2438
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,317,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryPeriodContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.end = None # ValueExpressionContext
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def rangeType(self):
return self.getTypedRuleContext(SqlBaseParser.RangeTypeContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def OF(self):
return self.getToken(SqlBaseParser.OF, 0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_queryPeriod
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQueryPeriod" ):
listener.enterQueryPeriod(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQueryPeriod" ):
listener.exitQueryPeriod(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQueryPeriod" ):
return visitor.visitQueryPeriod(self)
else:
return visitor.visitChildren(self)
def queryPeriod(self):
localctx = SqlBaseParser.QueryPeriodContext(self, self._ctx, self.state)
self.enterRule(localctx, 166, self.RULE_queryPeriod)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2439
self.match(SqlBaseParser.FOR)
self.state = 2440
self.rangeType()
self.state = 2441
self.match(SqlBaseParser.AS)
self.state = 2442
self.match(SqlBaseParser.OF)
self.state = 2443
localctx.end = self.valueExpression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RangeTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TIMESTAMP(self):
return self.getToken(SqlBaseParser.TIMESTAMP, 0)
def VERSION(self):
return self.getToken(SqlBaseParser.VERSION, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_rangeType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangeType" ):
listener.enterRangeType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangeType" ):
listener.exitRangeType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRangeType" ):
return visitor.visitRangeType(self)
else:
return visitor.visitChildren(self)
def rangeType(self):
localctx = SqlBaseParser.RangeTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 168, self.RULE_rangeType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2445
_la = self._input.LA(1)
if not(_la==SqlBaseParser.TIMESTAMP or _la==SqlBaseParser.VERSION):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GrantorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_grantor
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class CurrentUserGrantorContext(GrantorContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GrantorContext
super().__init__(parser)
self.copyFrom(ctx)
def CURRENT_USER(self):
return self.getToken(SqlBaseParser.CURRENT_USER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentUserGrantor" ):
listener.enterCurrentUserGrantor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentUserGrantor" ):
listener.exitCurrentUserGrantor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentUserGrantor" ):
return visitor.visitCurrentUserGrantor(self)
else:
return visitor.visitChildren(self)
class SpecifiedPrincipalContext(GrantorContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GrantorContext
super().__init__(parser)
self.copyFrom(ctx)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecifiedPrincipal" ):
listener.enterSpecifiedPrincipal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecifiedPrincipal" ):
listener.exitSpecifiedPrincipal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSpecifiedPrincipal" ):
return visitor.visitSpecifiedPrincipal(self)
else:
return visitor.visitChildren(self)
class CurrentRoleGrantorContext(GrantorContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GrantorContext
super().__init__(parser)
self.copyFrom(ctx)
def CURRENT_ROLE(self):
return self.getToken(SqlBaseParser.CURRENT_ROLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentRoleGrantor" ):
listener.enterCurrentRoleGrantor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentRoleGrantor" ):
listener.exitCurrentRoleGrantor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentRoleGrantor" ):
return visitor.visitCurrentRoleGrantor(self)
else:
return visitor.visitChildren(self)
def grantor(self):
localctx = SqlBaseParser.GrantorContext(self, self._ctx, self.state)
self.enterRule(localctx, 170, self.RULE_grantor)
try:
self.state = 2450
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
localctx = SqlBaseParser.SpecifiedPrincipalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2447
self.principal()
elif token in [SqlBaseParser.CURRENT_USER]:
localctx = SqlBaseParser.CurrentUserGrantorContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2448
self.match(SqlBaseParser.CURRENT_USER)
elif token in [SqlBaseParser.CURRENT_ROLE]:
localctx = SqlBaseParser.CurrentRoleGrantorContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2449
self.match(SqlBaseParser.CURRENT_ROLE)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrincipalContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_principal
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class UnspecifiedPrincipalContext(PrincipalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrincipalContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnspecifiedPrincipal" ):
listener.enterUnspecifiedPrincipal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnspecifiedPrincipal" ):
listener.exitUnspecifiedPrincipal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnspecifiedPrincipal" ):
return visitor.visitUnspecifiedPrincipal(self)
else:
return visitor.visitChildren(self)
class UserPrincipalContext(PrincipalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrincipalContext
super().__init__(parser)
self.copyFrom(ctx)
def USER(self):
return self.getToken(SqlBaseParser.USER, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUserPrincipal" ):
listener.enterUserPrincipal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUserPrincipal" ):
listener.exitUserPrincipal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUserPrincipal" ):
return visitor.visitUserPrincipal(self)
else:
return visitor.visitChildren(self)
class RolePrincipalContext(PrincipalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrincipalContext
super().__init__(parser)
self.copyFrom(ctx)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRolePrincipal" ):
listener.enterRolePrincipal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRolePrincipal" ):
listener.exitRolePrincipal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRolePrincipal" ):
return visitor.visitRolePrincipal(self)
else:
return visitor.visitChildren(self)
def principal(self):
localctx = SqlBaseParser.PrincipalContext(self, self._ctx, self.state)
self.enterRule(localctx, 172, self.RULE_principal)
try:
self.state = 2457
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,319,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.UnspecifiedPrincipalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2452
self.identifier()
elif la_ == 2:
localctx = SqlBaseParser.UserPrincipalContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2453
self.match(SqlBaseParser.USER)
self.state = 2454
self.identifier()
elif la_ == 3:
localctx = SqlBaseParser.RolePrincipalContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2455
self.match(SqlBaseParser.ROLE)
self.state = 2456
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RolesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_roles
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRoles" ):
listener.enterRoles(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRoles" ):
listener.exitRoles(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRoles" ):
return visitor.visitRoles(self)
else:
return visitor.visitChildren(self)
def roles(self):
localctx = SqlBaseParser.RolesContext(self, self._ctx, self.state)
self.enterRule(localctx, 174, self.RULE_roles)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2459
self.identifier()
self.state = 2464
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2460
self.match(SqlBaseParser.T__3)
self.state = 2461
self.identifier()
self.state = 2466
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_identifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class BackQuotedIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.IdentifierContext
super().__init__(parser)
self.copyFrom(ctx)
def BACKQUOTED_IDENTIFIER(self):
return self.getToken(SqlBaseParser.BACKQUOTED_IDENTIFIER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBackQuotedIdentifier" ):
listener.enterBackQuotedIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBackQuotedIdentifier" ):
listener.exitBackQuotedIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBackQuotedIdentifier" ):
return visitor.visitBackQuotedIdentifier(self)
else:
return visitor.visitChildren(self)
class QuotedIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.IdentifierContext
super().__init__(parser)
self.copyFrom(ctx)
def QUOTED_IDENTIFIER(self):
return self.getToken(SqlBaseParser.QUOTED_IDENTIFIER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuotedIdentifier" ):
listener.enterQuotedIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuotedIdentifier" ):
listener.exitQuotedIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuotedIdentifier" ):
return visitor.visitQuotedIdentifier(self)
else:
return visitor.visitChildren(self)
class DigitIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.IdentifierContext
super().__init__(parser)
self.copyFrom(ctx)
def DIGIT_IDENTIFIER(self):
return self.getToken(SqlBaseParser.DIGIT_IDENTIFIER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDigitIdentifier" ):
listener.enterDigitIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDigitIdentifier" ):
listener.exitDigitIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDigitIdentifier" ):
return visitor.visitDigitIdentifier(self)
else:
return visitor.visitChildren(self)
class UnquotedIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.IdentifierContext
super().__init__(parser)
self.copyFrom(ctx)
def IDENTIFIER(self):
return self.getToken(SqlBaseParser.IDENTIFIER, 0)
def nonReserved(self):
return self.getTypedRuleContext(SqlBaseParser.NonReservedContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnquotedIdentifier" ):
listener.enterUnquotedIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnquotedIdentifier" ):
listener.exitUnquotedIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnquotedIdentifier" ):
return visitor.visitUnquotedIdentifier(self)
else:
return visitor.visitChildren(self)
def identifier(self):
localctx = SqlBaseParser.IdentifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 176, self.RULE_identifier)
try:
self.state = 2472
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.IDENTIFIER]:
localctx = SqlBaseParser.UnquotedIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2467
self.match(SqlBaseParser.IDENTIFIER)
elif token in [SqlBaseParser.QUOTED_IDENTIFIER]:
localctx = SqlBaseParser.QuotedIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2468
self.match(SqlBaseParser.QUOTED_IDENTIFIER)
elif token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE]:
localctx = SqlBaseParser.UnquotedIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2469
self.nonReserved()
elif token in [SqlBaseParser.BACKQUOTED_IDENTIFIER]:
localctx = SqlBaseParser.BackQuotedIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2470
self.match(SqlBaseParser.BACKQUOTED_IDENTIFIER)
elif token in [SqlBaseParser.DIGIT_IDENTIFIER]:
localctx = SqlBaseParser.DigitIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 2471
self.match(SqlBaseParser.DIGIT_IDENTIFIER)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NumberContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_number
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class DecimalLiteralContext(NumberContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.NumberContext
super().__init__(parser)
self.copyFrom(ctx)
def DECIMAL_VALUE(self):
return self.getToken(SqlBaseParser.DECIMAL_VALUE, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDecimalLiteral" ):
listener.enterDecimalLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDecimalLiteral" ):
listener.exitDecimalLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDecimalLiteral" ):
return visitor.visitDecimalLiteral(self)
else:
return visitor.visitChildren(self)
class DoubleLiteralContext(NumberContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.NumberContext
super().__init__(parser)
self.copyFrom(ctx)
def DOUBLE_VALUE(self):
return self.getToken(SqlBaseParser.DOUBLE_VALUE, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDoubleLiteral" ):
listener.enterDoubleLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDoubleLiteral" ):
listener.exitDoubleLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDoubleLiteral" ):
return visitor.visitDoubleLiteral(self)
else:
return visitor.visitChildren(self)
class IntegerLiteralContext(NumberContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.NumberContext
super().__init__(parser)
self.copyFrom(ctx)
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntegerLiteral" ):
listener.enterIntegerLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntegerLiteral" ):
listener.exitIntegerLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntegerLiteral" ):
return visitor.visitIntegerLiteral(self)
else:
return visitor.visitChildren(self)
def number(self):
localctx = SqlBaseParser.NumberContext(self, self._ctx, self.state)
self.enterRule(localctx, 178, self.RULE_number)
self._la = 0 # Token type
try:
self.state = 2486
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,325,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.DecimalLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2475
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MINUS:
self.state = 2474
self.match(SqlBaseParser.MINUS)
self.state = 2477
self.match(SqlBaseParser.DECIMAL_VALUE)
elif la_ == 2:
localctx = SqlBaseParser.DoubleLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2479
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MINUS:
self.state = 2478
self.match(SqlBaseParser.MINUS)
self.state = 2481
self.match(SqlBaseParser.DOUBLE_VALUE)
elif la_ == 3:
localctx = SqlBaseParser.IntegerLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2483
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MINUS:
self.state = 2482
self.match(SqlBaseParser.MINUS)
self.state = 2485
self.match(SqlBaseParser.INTEGER_VALUE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NonReservedContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ADD(self):
return self.getToken(SqlBaseParser.ADD, 0)
def ADMIN(self):
return self.getToken(SqlBaseParser.ADMIN, 0)
def AFTER(self):
return self.getToken(SqlBaseParser.AFTER, 0)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def ANALYZE(self):
return self.getToken(SqlBaseParser.ANALYZE, 0)
def ANY(self):
return self.getToken(SqlBaseParser.ANY, 0)
def ARRAY(self):
return self.getToken(SqlBaseParser.ARRAY, 0)
def ASC(self):
return self.getToken(SqlBaseParser.ASC, 0)
def AT(self):
return self.getToken(SqlBaseParser.AT, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def BERNOULLI(self):
return self.getToken(SqlBaseParser.BERNOULLI, 0)
def CALL(self):
return self.getToken(SqlBaseParser.CALL, 0)
def CASCADE(self):
return self.getToken(SqlBaseParser.CASCADE, 0)
def CATALOGS(self):
return self.getToken(SqlBaseParser.CATALOGS, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def COLUMNS(self):
return self.getToken(SqlBaseParser.COLUMNS, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def COMMIT(self):
return self.getToken(SqlBaseParser.COMMIT, 0)
def COMMITTED(self):
return self.getToken(SqlBaseParser.COMMITTED, 0)
def COUNT(self):
return self.getToken(SqlBaseParser.COUNT, 0)
def CURRENT(self):
return self.getToken(SqlBaseParser.CURRENT, 0)
def DATA(self):
return self.getToken(SqlBaseParser.DATA, 0)
def DATE(self):
return self.getToken(SqlBaseParser.DATE, 0)
def DAY(self):
return self.getToken(SqlBaseParser.DAY, 0)
def DEFINE(self):
return self.getToken(SqlBaseParser.DEFINE, 0)
def DEFINER(self):
return self.getToken(SqlBaseParser.DEFINER, 0)
def DESC(self):
return self.getToken(SqlBaseParser.DESC, 0)
def DISTRIBUTED(self):
return self.getToken(SqlBaseParser.DISTRIBUTED, 0)
def DOUBLE(self):
return self.getToken(SqlBaseParser.DOUBLE, 0)
def EMPTY(self):
return self.getToken(SqlBaseParser.EMPTY, 0)
def ERROR(self):
return self.getToken(SqlBaseParser.ERROR, 0)
def EXCLUDING(self):
return self.getToken(SqlBaseParser.EXCLUDING, 0)
def EXPLAIN(self):
return self.getToken(SqlBaseParser.EXPLAIN, 0)
def FETCH(self):
return self.getToken(SqlBaseParser.FETCH, 0)
def FILTER(self):
return self.getToken(SqlBaseParser.FILTER, 0)
def FINAL(self):
return self.getToken(SqlBaseParser.FINAL, 0)
def FIRST(self):
return self.getToken(SqlBaseParser.FIRST, 0)
def FOLLOWING(self):
return self.getToken(SqlBaseParser.FOLLOWING, 0)
def FORMAT(self):
return self.getToken(SqlBaseParser.FORMAT, 0)
def FUNCTIONS(self):
return self.getToken(SqlBaseParser.FUNCTIONS, 0)
def GRANT(self):
return self.getToken(SqlBaseParser.GRANT, 0)
def DENY(self):
return self.getToken(SqlBaseParser.DENY, 0)
def GRANTED(self):
return self.getToken(SqlBaseParser.GRANTED, 0)
def GRANTS(self):
return self.getToken(SqlBaseParser.GRANTS, 0)
def GRAPHVIZ(self):
return self.getToken(SqlBaseParser.GRAPHVIZ, 0)
def GROUPS(self):
return self.getToken(SqlBaseParser.GROUPS, 0)
def HOUR(self):
return self.getToken(SqlBaseParser.HOUR, 0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def IGNORE(self):
return self.getToken(SqlBaseParser.IGNORE, 0)
def INCLUDING(self):
return self.getToken(SqlBaseParser.INCLUDING, 0)
def INITIAL(self):
return self.getToken(SqlBaseParser.INITIAL, 0)
def INPUT(self):
return self.getToken(SqlBaseParser.INPUT, 0)
def INTERVAL(self):
return self.getToken(SqlBaseParser.INTERVAL, 0)
def INVOKER(self):
return self.getToken(SqlBaseParser.INVOKER, 0)
def IO(self):
return self.getToken(SqlBaseParser.IO, 0)
def ISOLATION(self):
return self.getToken(SqlBaseParser.ISOLATION, 0)
def JSON(self):
return self.getToken(SqlBaseParser.JSON, 0)
def LAST(self):
return self.getToken(SqlBaseParser.LAST, 0)
def LATERAL(self):
return self.getToken(SqlBaseParser.LATERAL, 0)
def LEVEL(self):
return self.getToken(SqlBaseParser.LEVEL, 0)
def LIMIT(self):
return self.getToken(SqlBaseParser.LIMIT, 0)
def LOCAL(self):
return self.getToken(SqlBaseParser.LOCAL, 0)
def LOGICAL(self):
return self.getToken(SqlBaseParser.LOGICAL, 0)
def MAP(self):
return self.getToken(SqlBaseParser.MAP, 0)
def MATCH(self):
return self.getToken(SqlBaseParser.MATCH, 0)
def MATCHED(self):
return self.getToken(SqlBaseParser.MATCHED, 0)
def MATCHES(self):
return self.getToken(SqlBaseParser.MATCHES, 0)
def MATCH_RECOGNIZE(self):
return self.getToken(SqlBaseParser.MATCH_RECOGNIZE, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def MEASURES(self):
return self.getToken(SqlBaseParser.MEASURES, 0)
def MERGE(self):
return self.getToken(SqlBaseParser.MERGE, 0)
def MINUTE(self):
return self.getToken(SqlBaseParser.MINUTE, 0)
def MONTH(self):
return self.getToken(SqlBaseParser.MONTH, 0)
def NEXT(self):
return self.getToken(SqlBaseParser.NEXT, 0)
def NFC(self):
return self.getToken(SqlBaseParser.NFC, 0)
def NFD(self):
return self.getToken(SqlBaseParser.NFD, 0)
def NFKC(self):
return self.getToken(SqlBaseParser.NFKC, 0)
def NFKD(self):
return self.getToken(SqlBaseParser.NFKD, 0)
def NO(self):
return self.getToken(SqlBaseParser.NO, 0)
def NONE(self):
return self.getToken(SqlBaseParser.NONE, 0)
def NULLIF(self):
return self.getToken(SqlBaseParser.NULLIF, 0)
def NULLS(self):
return self.getToken(SqlBaseParser.NULLS, 0)
def OF(self):
return self.getToken(SqlBaseParser.OF, 0)
def OFFSET(self):
return self.getToken(SqlBaseParser.OFFSET, 0)
def OMIT(self):
return self.getToken(SqlBaseParser.OMIT, 0)
def ONE(self):
return self.getToken(SqlBaseParser.ONE, 0)
def ONLY(self):
return self.getToken(SqlBaseParser.ONLY, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def ORDINALITY(self):
return self.getToken(SqlBaseParser.ORDINALITY, 0)
def OUTPUT(self):
return self.getToken(SqlBaseParser.OUTPUT, 0)
def OVER(self):
return self.getToken(SqlBaseParser.OVER, 0)
def OVERFLOW(self):
return self.getToken(SqlBaseParser.OVERFLOW, 0)
def PARTITION(self):
return self.getToken(SqlBaseParser.PARTITION, 0)
def PARTITIONS(self):
return self.getToken(SqlBaseParser.PARTITIONS, 0)
def PAST(self):
return self.getToken(SqlBaseParser.PAST, 0)
def PATH(self):
return self.getToken(SqlBaseParser.PATH, 0)
def PATTERN(self):
return self.getToken(SqlBaseParser.PATTERN, 0)
def PER(self):
return self.getToken(SqlBaseParser.PER, 0)
def PERMUTE(self):
return self.getToken(SqlBaseParser.PERMUTE, 0)
def POSITION(self):
return self.getToken(SqlBaseParser.POSITION, 0)
def PRECEDING(self):
return self.getToken(SqlBaseParser.PRECEDING, 0)
def PRECISION(self):
return self.getToken(SqlBaseParser.PRECISION, 0)
def PRIVILEGES(self):
return self.getToken(SqlBaseParser.PRIVILEGES, 0)
def PROPERTIES(self):
return self.getToken(SqlBaseParser.PROPERTIES, 0)
def RANGE(self):
return self.getToken(SqlBaseParser.RANGE, 0)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def REFRESH(self):
return self.getToken(SqlBaseParser.REFRESH, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def REPEATABLE(self):
return self.getToken(SqlBaseParser.REPEATABLE, 0)
def REPLACE(self):
return self.getToken(SqlBaseParser.REPLACE, 0)
def RESET(self):
return self.getToken(SqlBaseParser.RESET, 0)
def RESPECT(self):
return self.getToken(SqlBaseParser.RESPECT, 0)
def RESTRICT(self):
return self.getToken(SqlBaseParser.RESTRICT, 0)
def REVOKE(self):
return self.getToken(SqlBaseParser.REVOKE, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def ROLES(self):
return self.getToken(SqlBaseParser.ROLES, 0)
def ROLLBACK(self):
return self.getToken(SqlBaseParser.ROLLBACK, 0)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def ROWS(self):
return self.getToken(SqlBaseParser.ROWS, 0)
def RUNNING(self):
return self.getToken(SqlBaseParser.RUNNING, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def SCHEMAS(self):
return self.getToken(SqlBaseParser.SCHEMAS, 0)
def SECOND(self):
return self.getToken(SqlBaseParser.SECOND, 0)
def SECURITY(self):
return self.getToken(SqlBaseParser.SECURITY, 0)
def SEEK(self):
return self.getToken(SqlBaseParser.SEEK, 0)
def SERIALIZABLE(self):
return self.getToken(SqlBaseParser.SERIALIZABLE, 0)
def SESSION(self):
return self.getToken(SqlBaseParser.SESSION, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def SETS(self):
return self.getToken(SqlBaseParser.SETS, 0)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def SOME(self):
return self.getToken(SqlBaseParser.SOME, 0)
def START(self):
return self.getToken(SqlBaseParser.START, 0)
def STATS(self):
return self.getToken(SqlBaseParser.STATS, 0)
def SUBSET(self):
return self.getToken(SqlBaseParser.SUBSET, 0)
def SUBSTRING(self):
return self.getToken(SqlBaseParser.SUBSTRING, 0)
def SYSTEM(self):
return self.getToken(SqlBaseParser.SYSTEM, 0)
def TABLES(self):
return self.getToken(SqlBaseParser.TABLES, 0)
def TABLESAMPLE(self):
return self.getToken(SqlBaseParser.TABLESAMPLE, 0)
def TEXT(self):
return self.getToken(SqlBaseParser.TEXT, 0)
def TIES(self):
return self.getToken(SqlBaseParser.TIES, 0)
def TIME(self):
return self.getToken(SqlBaseParser.TIME, 0)
def TIMESTAMP(self):
return self.getToken(SqlBaseParser.TIMESTAMP, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def TRANSACTION(self):
return self.getToken(SqlBaseParser.TRANSACTION, 0)
def TRUNCATE(self):
return self.getToken(SqlBaseParser.TRUNCATE, 0)
def TRY_CAST(self):
return self.getToken(SqlBaseParser.TRY_CAST, 0)
def TYPE(self):
return self.getToken(SqlBaseParser.TYPE, 0)
def UNBOUNDED(self):
return self.getToken(SqlBaseParser.UNBOUNDED, 0)
def UNCOMMITTED(self):
return self.getToken(SqlBaseParser.UNCOMMITTED, 0)
def UNMATCHED(self):
return self.getToken(SqlBaseParser.UNMATCHED, 0)
def UPDATE(self):
return self.getToken(SqlBaseParser.UPDATE, 0)
def USE(self):
return self.getToken(SqlBaseParser.USE, 0)
def USER(self):
return self.getToken(SqlBaseParser.USER, 0)
def VALIDATE(self):
return self.getToken(SqlBaseParser.VALIDATE, 0)
def VERBOSE(self):
return self.getToken(SqlBaseParser.VERBOSE, 0)
def VERSION(self):
return self.getToken(SqlBaseParser.VERSION, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def WINDOW(self):
return self.getToken(SqlBaseParser.WINDOW, 0)
def WITHIN(self):
return self.getToken(SqlBaseParser.WITHIN, 0)
def WITHOUT(self):
return self.getToken(SqlBaseParser.WITHOUT, 0)
def WORK(self):
return self.getToken(SqlBaseParser.WORK, 0)
def WRITE(self):
return self.getToken(SqlBaseParser.WRITE, 0)
def YEAR(self):
return self.getToken(SqlBaseParser.YEAR, 0)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_nonReserved
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNonReserved" ):
listener.enterNonReserved(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNonReserved" ):
listener.exitNonReserved(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNonReserved" ):
return visitor.visitNonReserved(self)
else:
return visitor.visitChildren(self)
def nonReserved(self):
localctx = SqlBaseParser.NonReservedContext(self, self._ctx, self.state)
self.enterRule(localctx, 180, self.RULE_nonReserved)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2488
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[17] = self.queryTerm_sempred
self._predicates[29] = self.relation_sempred
self._predicates[47] = self.booleanExpression_sempred
self._predicates[49] = self.valueExpression_sempred
self._predicates[50] = self.primaryExpression_sempred
self._predicates[61] = self.type__sempred
self._predicates[71] = self.rowPattern_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def queryTerm_sempred(self, localctx:QueryTermContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 2)
if predIndex == 1:
return self.precpred(self._ctx, 1)
def relation_sempred(self, localctx:RelationContext, predIndex:int):
if predIndex == 2:
return self.precpred(self._ctx, 2)
def booleanExpression_sempred(self, localctx:BooleanExpressionContext, predIndex:int):
if predIndex == 3:
return self.precpred(self._ctx, 2)
if predIndex == 4:
return self.precpred(self._ctx, 1)
def valueExpression_sempred(self, localctx:ValueExpressionContext, predIndex:int):
if predIndex == 5:
return self.precpred(self._ctx, 3)
if predIndex == 6:
return self.precpred(self._ctx, 2)
if predIndex == 7:
return self.precpred(self._ctx, 1)
if predIndex == 8:
return self.precpred(self._ctx, 5)
def primaryExpression_sempred(self, localctx:PrimaryExpressionContext, predIndex:int):
if predIndex == 9:
return self.precpred(self._ctx, 17)
if predIndex == 10:
return self.precpred(self._ctx, 15)
def type__sempred(self, localctx:Type_Context, predIndex:int):
if predIndex == 11:
return self.precpred(self._ctx, 2)
def rowPattern_sempred(self, localctx:RowPatternContext, predIndex:int):
if predIndex == 12:
return self.precpred(self._ctx, 2)
if predIndex == 13:
return self.precpred(self._ctx, 1)
| 44.783838 | 7,703 | 0.587619 |
import sys
from io import StringIO
from antlr4 import *
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\u011c")
buf.write("\u09bd\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t")
buf.write(";\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\t")
buf.write("D\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\tL\4M\t")
buf.write("M\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\tU\4V\t")
buf.write("V\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\3\2\3\2\3\2\3")
buf.write("\3\3\3\3\3\3\4\3\4\3\4\3\5\3\5\3\5\3\6\3\6\3\6\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u00d5")
buf.write("\n\7\3\7\3\7\3\7\5\7\u00da\n\7\3\7\3\7\5\7\u00de\n\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u00e4\n\7\3\7\3\7\5\7\u00e8\n\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\5\7\u00fd\n\7\3\7\3\7\5\7\u0101\n\7")
buf.write("\3\7\3\7\5\7\u0105\n\7\3\7\3\7\5\7\u0109\n\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u0111\n\7\3\7\3\7\5\7\u0115\n\7\3\7")
buf.write("\5\7\u0118\n\7\3\7\3\7\3\7\3\7\3\7\5\7\u011f\n\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\7\7\u0126\n\7\f\7\16\7\u0129\13\7\3\7\3")
buf.write("\7\3\7\5\7\u012e\n\7\3\7\3\7\5\7\u0132\n\7\3\7\3\7\3\7")
buf.write("\3\7\5\7\u0138\n\7\3\7\3\7\3\7\3\7\3\7\5\7\u013f\n\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0148\n\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0154\n\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u015d\n\7\3\7\3\7\3\7\3\7\5\7\u0163")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u016e\n\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0176\n\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\5\7\u017e\n\7\3\7\3\7\3\7\3\7\3\7\5\7\u0185\n")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u018f\n\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u0196\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\7\7\u01b1\n\7\f\7\16\7\u01b4\13\7")
buf.write("\5\7\u01b6\n\7\3\7\5\7\u01b9\n\7\3\7\3\7\5\7\u01bd\n\7")
buf.write("\3\7\3\7\3\7\3\7\5\7\u01c3\n\7\3\7\3\7\3\7\5\7\u01c8\n")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\5\7\u01cf\n\7\3\7\3\7\3\7\5\7\u01d4")
buf.write("\n\7\3\7\3\7\5\7\u01d8\n\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7")
buf.write("\u01e0\n\7\3\7\3\7\3\7\3\7\5\7\u01e6\n\7\3\7\3\7\5\7\u01ea")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5")
buf.write("\7\u01f8\n\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0200\n\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u020b\n\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\7\7\u0222\n\7\f\7\16\7\u0225\13")
buf.write("\7\5\7\u0227\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0231")
buf.write("\n\7\3\7\3\7\5\7\u0235\n\7\3\7\3\7\3\7\3\7\3\7\5\7\u023c")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\7\7\u0244\n\7\f\7\16\7\u0247")
buf.write("\13\7\3\7\3\7\3\7\5\7\u024c\n\7\3\7\3\7\3\7\5\7\u0251")
buf.write("\n\7\3\7\3\7\5\7\u0255\n\7\3\7\3\7\3\7\3\7\5\7\u025b\n")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\7\7\u0262\n\7\f\7\16\7\u0265\13")
buf.write("\7\3\7\3\7\3\7\5\7\u026a\n\7\3\7\3\7\5\7\u026e\n\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\5\7\u0275\n\7\3\7\3\7\5\7\u0279\n\7\3")
buf.write("\7\3\7\3\7\3\7\7\7\u027f\n\7\f\7\16\7\u0282\13\7\3\7\3")
buf.write("\7\5\7\u0286\n\7\3\7\3\7\5\7\u028a\n\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\5\7\u0292\n\7\3\7\3\7\3\7\3\7\7\7\u0298\n\7\f")
buf.write("\7\16\7\u029b\13\7\3\7\3\7\5\7\u029f\n\7\3\7\3\7\5\7\u02a3")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u02ad\n\7\3\7")
buf.write("\3\7\3\7\7\7\u02b2\n\7\f\7\16\7\u02b5\13\7\3\7\3\7\5\7")
buf.write("\u02b9\n\7\3\7\3\7\5\7\u02bd\n\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\5\7\u02c7\n\7\3\7\5\7\u02ca\n\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\7\7\u02d1\n\7\f\7\16\7\u02d4\13\7\3\7\3\7\5\7")
buf.write("\u02d8\n\7\3\7\3\7\3\7\3\7\5\7\u02de\n\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\5\7\u02f6\n\7\3\7\3\7\3\7\3\7\5\7")
buf.write("\u02fc\n\7\5\7\u02fe\n\7\3\7\3\7\3\7\3\7\5\7\u0304\n\7")
buf.write("\3\7\3\7\3\7\3\7\5\7\u030a\n\7\5\7\u030c\n\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7\u0314\n\7\5\7\u0316\n\7\3\7\3\7\3\7")
buf.write("\3\7\5\7\u031c\n\7\3\7\3\7\3\7\3\7\5\7\u0322\n\7\5\7\u0324")
buf.write("\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\5\7\u0333\n\7\3\7\3\7\3\7\5\7\u0338\n\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\5\7\u033f\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\5\7\u034b\n\7\5\7\u034d\n\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\5\7\u0355\n\7\5\7\u0357\n\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\7\7\u0367\n\7\f\7")
buf.write("\16\7\u036a\13\7\5\7\u036c\n\7\3\7\3\7\5\7\u0370\n\7\3")
buf.write("\7\3\7\5\7\u0374\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\7\7\u0384\n\7\f\7\16\7\u0387\13")
buf.write("\7\5\7\u0389\n\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\5\7\u0399\n\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\7\7\u03a1\n\7\f\7\16\7\u03a4\13\7\3\7\3\7\5\7\u03a8")
buf.write("\n\7\3\7\3\7\3\7\3\7\5\7\u03ae\n\7\3\7\5\7\u03b1\n\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\6\7\u03b8\n\7\r\7\16\7\u03b9\5\7\u03bc")
buf.write("\n\7\3\b\5\b\u03bf\n\b\3\b\3\b\3\t\3\t\5\t\u03c5\n\t\3")
buf.write("\t\3\t\3\t\7\t\u03ca\n\t\f\t\16\t\u03cd\13\t\3\n\3\n\5")
buf.write("\n\u03d1\n\n\3\13\3\13\3\13\3\13\5\13\u03d7\n\13\3\13")
buf.write("\3\13\5\13\u03db\n\13\3\13\3\13\5\13\u03df\n\13\3\f\3")
buf.write("\f\3\f\3\f\5\f\u03e5\n\f\3\r\3\r\3\r\3\r\3\16\3\16\3\16")
buf.write("\7\16\u03ee\n\16\f\16\16\16\u03f1\13\16\3\17\3\17\3\17")
buf.write("\3\17\3\20\3\20\3\20\3\20\3\20\3\20\7\20\u03fd\n\20\f")
buf.write("\20\16\20\u0400\13\20\5\20\u0402\n\20\3\20\3\20\3\20\5")
buf.write("\20\u0407\n\20\5\20\u0409\n\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\5\20\u0410\n\20\3\20\3\20\3\20\3\20\5\20\u0416\n\20\5")
buf.write("\20\u0418\n\20\3\21\3\21\5\21\u041c\n\21\3\22\3\22\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\23\5\23\u0426\n\23\3\23\3\23\3")
buf.write("\23\3\23\5\23\u042c\n\23\3\23\7\23\u042f\n\23\f\23\16")
buf.write("\23\u0432\13\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\7\24")
buf.write("\u043b\n\24\f\24\16\24\u043e\13\24\3\24\3\24\3\24\3\24")
buf.write("\5\24\u0444\n\24\3\25\3\25\5\25\u0448\n\25\3\25\3\25\5")
buf.write("\25\u044c\n\25\3\26\3\26\5\26\u0450\n\26\3\26\3\26\3\26")
buf.write("\7\26\u0455\n\26\f\26\16\26\u0458\13\26\3\26\3\26\3\26")
buf.write("\3\26\7\26\u045e\n\26\f\26\16\26\u0461\13\26\5\26\u0463")
buf.write("\n\26\3\26\3\26\5\26\u0467\n\26\3\26\3\26\3\26\5\26\u046c")
buf.write("\n\26\3\26\3\26\5\26\u0470\n\26\3\26\3\26\3\26\3\26\7")
buf.write("\26\u0476\n\26\f\26\16\26\u0479\13\26\5\26\u047b\n\26")
buf.write("\3\27\5\27\u047e\n\27\3\27\3\27\3\27\7\27\u0483\n\27\f")
buf.write("\27\16\27\u0486\13\27\3\30\3\30\3\30\3\30\3\30\3\30\7")
buf.write("\30\u048e\n\30\f\30\16\30\u0491\13\30\5\30\u0493\n\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u049b\n\30\f\30\16")
buf.write("\30\u049e\13\30\5\30\u04a0\n\30\3\30\3\30\3\30\3\30\3")
buf.write("\30\3\30\3\30\7\30\u04a9\n\30\f\30\16\30\u04ac\13\30\3")
buf.write("\30\3\30\5\30\u04b0\n\30\3\31\3\31\3\31\3\31\7\31\u04b6")
buf.write("\n\31\f\31\16\31\u04b9\13\31\5\31\u04bb\n\31\3\31\3\31")
buf.write("\5\31\u04bf\n\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\5")
buf.write("\33\u04c8\n\33\3\33\3\33\3\33\3\33\3\33\7\33\u04cf\n\33")
buf.write("\f\33\16\33\u04d2\13\33\5\33\u04d4\n\33\3\33\3\33\3\33")
buf.write("\3\33\3\33\7\33\u04db\n\33\f\33\16\33\u04de\13\33\5\33")
buf.write("\u04e0\n\33\3\33\5\33\u04e3\n\33\3\34\3\34\5\34\u04e7")
buf.write("\n\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\36\3\36\5\36")
buf.write("\u04f2\n\36\3\36\5\36\u04f5\n\36\3\36\3\36\3\36\3\36\3")
buf.write("\36\5\36\u04fc\n\36\3\36\5\36\u04ff\n\36\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\5\37\u0512\n\37\7\37\u0514\n\37\f\37\16")
buf.write("\37\u0517\13\37\3 \5 \u051a\n \3 \3 \5 \u051e\n \3 \3")
buf.write(" \5 \u0522\n \3 \3 \5 \u0526\n \5 \u0528\n \3!\3!\3!\3")
buf.write("!\3!\3!\3!\7!\u0531\n!\f!\16!\u0534\13!\3!\3!\5!\u0538")
buf.write("\n!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u0541\n\"\3 buf.write("$\3$\3$\5$\u0548\n$\3$\5$\u054b\n$\3%\3%\3%\3%\5%\u0551")
buf.write("\n%\3&\3&\3&\3&\3&\3&\3&\3&\7&\u055b\n&\f&\16&\u055e\13")
buf.write("&\5&\u0560\n&\3&\3&\3&\3&\3&\7&\u0567\n&\f&\16&\u056a")
buf.write("\13&\5&\u056c\n&\3&\3&\3&\3&\7&\u0572\n&\f&\16&\u0575")
buf.write("\13&\5&\u0577\n&\3&\5&\u057a\n&\3&\3&\3&\5&\u057f\n&\3")
buf.write("&\5&\u0582\n&\3&\3&\3&\3&\3&\3&\3&\3&\7&\u058c\n&\f&\16")
buf.write("&\u058f\13&\5&\u0591\n&\3&\3&\3&\3&\7&\u0597\n&\f&\16")
buf.write("&\u059a\13&\3&\3&\5&\u059e\n&\3&\3&\5&\u05a2\n&\5&\u05a4")
buf.write("\n&\5&\u05a6\n&\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3")
buf.write("(\3(\5(\u05b5\n(\5(\u05b7\n(\3)\3)\3)\3)\3)\3)\3)\3)\3")
buf.write(")\5)\u05c2\n)\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3")
buf.write("*\3*\3*\3*\3*\3*\5*\u05d7\n*\3+\3+\3+\3+\3+\3+\7+\u05df")
buf.write("\n+\f+\16+\u05e2\13+\3+\3+\3,\3,\3,\3,\3-\3-\5-\u05ec")
buf.write("\n-\3-\3-\5-\u05f0\n-\5-\u05f2\n-\3.\3.\3.\3.\7.\u05f8")
buf.write("\n.\f.\16.\u05fb\13.\3.\3.\3/\3/\5/\u0601\n/\3/\3/\3/")
buf.write("\3/\3/\3/\3/\3/\3/\7/\u060c\n/\f/\16/\u060f\13/\3/\3/")
buf.write("\3/\5/\u0614\n/\3/\3/\3/\3/\3/\3/\3/\3/\3/\5/\u061f\n")
buf.write("/\3\60\3\60\3\61\3\61\3\61\5\61\u0626\n\61\3\61\3\61\5")
buf.write("\61\u062a\n\61\3\61\3\61\3\61\3\61\3\61\3\61\7\61\u0632")
buf.write("\n\61\f\61\16\61\u0635\13\61\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\62\3\62\3\62\5\62\u0641\n\62\3\62\3\62\3")
buf.write("\62\3\62\3\62\3\62\5\62\u0649\n\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\7\62\u0650\n\62\f\62\16\62\u0653\13\62\3\62\3\62")
buf.write("\3\62\5\62\u0658\n\62\3\62\3\62\3\62\3\62\3\62\3\62\5")
buf.write("\62\u0660\n\62\3\62\3\62\3\62\3\62\5\62\u0666\n\62\3\62")
buf.write("\3\62\5\62\u066a\n\62\3\62\3\62\3\62\5\62\u066f\n\62\3")
buf.write("\62\3\62\3\62\5\62\u0674\n\62\3\63\3\63\3\63\3\63\5\63")
buf.write("\u067a\n\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3")
buf.write("\63\3\63\3\63\3\63\7\63\u0688\n\63\f\63\16\63\u068b\13")
buf.write("\63\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\6\64\u06a6\n\64\r\64\16\64\u06a7")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\7\64\u06b1\n\64\f")
buf.write("\64\16\64\u06b4\13\64\3\64\3\64\3\64\3\64\3\64\5\64\u06bb")
buf.write("\n\64\3\64\3\64\3\64\5\64\u06c0\n\64\3\64\3\64\3\64\5")
buf.write("\64\u06c5\n\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\7\64\u06d0\n\64\f\64\16\64\u06d3\13\64\3\64\3\64")
buf.write("\3\64\5\64\u06d8\n\64\3\64\3\64\3\64\3\64\3\64\5\64\u06df")
buf.write("\n\64\3\64\3\64\3\64\5\64\u06e4\n\64\3\64\5\64\u06e7\n")
buf.write("\64\3\64\5\64\u06ea\n\64\3\64\3\64\3\64\5\64\u06ef\n\64")
buf.write("\3\64\3\64\3\64\7\64\u06f4\n\64\f\64\16\64\u06f7\13\64")
buf.write("\5\64\u06f9\n\64\3\64\3\64\3\64\3\64\3\64\7\64\u0700\n")
buf.write("\64\f\64\16\64\u0703\13\64\5\64\u0705\n\64\3\64\3\64\5")
buf.write("\64\u0709\n\64\3\64\5\64\u070c\n\64\3\64\5\64\u070f\n")
buf.write("\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\7\64\u071c\n\64\f\64\16\64\u071f\13\64\5\64\u0721")
buf.write("\n\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\6\64\u0732\n\64\r\64\16\64\u0733")
buf.write("\3\64\3\64\5\64\u0738\n\64\3\64\3\64\3\64\3\64\6\64\u073e")
buf.write("\n\64\r\64\16\64\u073f\3\64\3\64\5\64\u0744\n\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\7\64\u075b")
buf.write("\n\64\f\64\16\64\u075e\13\64\5\64\u0760\n\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\5\64\u0769\n\64\3\64\3\64\3")
buf.write("\64\3\64\5\64\u076f\n\64\3\64\3\64\3\64\3\64\5\64\u0775")
buf.write("\n\64\3\64\3\64\3\64\3\64\5\64\u077b\n\64\3\64\3\64\3")
buf.write("\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\5\64\u0788")
buf.write("\n\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\5\64\u0791\n")
buf.write("\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\7\64\u07a5\n")
buf.write("\64\f\64\16\64\u07a8\13\64\5\64\u07aa\n\64\3\64\5\64\u07ad")
buf.write("\n\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\7\64\u07b7")
buf.write("\n\64\f\64\16\64\u07ba\13\64\3\65\3\65\3\66\3\66\3\66")
buf.write("\3\66\5\66\u07c2\n\66\3\67\3\67\3\67\3\67\5\67\u07c8\n")
buf.write("\67\5\67\u07ca\n\67\38\38\38\38\38\38\58\u07d2\n8\39\3")
buf.write("9\3:\3:\3;\3;\3<\3<\5<\u07dc\n<\3<\3<\3<\3<\5<\u07e2\n")
buf.write("<\3=\3=\3>\3>\3?\3?\3?\3?\3?\3?\7?\u07ee\n?\f?\16?\u07f1")
buf.write("\13?\3?\3?\3?\3?\3?\3?\5?\u07f9\n?\3?\3?\3?\3?\3?\5?\u0800")
buf.write("\n?\3?\3?\3?\5?\u0805\n?\3?\3?\3?\3?\3?\5?\u080c\n?\3")
buf.write("?\3?\3?\3?\3?\3?\3?\3?\5?\u0816\n?\3?\3?\3?\5?\u081b\n")
buf.write("?\3?\3?\3?\3?\3?\5?\u0822\n?\3?\3?\3?\3?\3?\3?\3?\3?\3")
buf.write("?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\7?\u083a\n?\f")
buf.write("?\16?\u083d\13?\3?\3?\5?\u0841\n?\5?\u0843\n?\3?\3?\3")
buf.write("?\3?\3?\5?\u084a\n?\7?\u084c\n?\f?\16?\u084f\13?\3@\3")
buf.write("@\3@\3@\5@\u0855\n@\3A\3A\5A\u0859\nA\3B\3B\3B\3B\3B\3")
buf.write("C\3C\3C\3C\3C\3C\3D\3D\3D\3D\5D\u086a\nD\3D\3D\3D\3D\3")
buf.write("D\3D\3D\3D\3D\3D\3D\7D\u0877\nD\fD\16D\u087a\13D\3D\3")
buf.write("D\3D\3D\5D\u0880\nD\3D\3D\3D\3D\3D\3D\3D\5D\u0889\nD\3")
buf.write("D\3D\3D\3D\3D\3D\7D\u0891\nD\fD\16D\u0894\13D\3D\3D\5")
buf.write("D\u0898\nD\3D\3D\3D\3D\3D\7D\u089f\nD\fD\16D\u08a2\13")
buf.write("D\3D\3D\5D\u08a6\nD\3E\3E\3E\3E\3E\3E\5E\u08ae\nE\3F\3")
buf.write("F\3F\3F\7F\u08b4\nF\fF\16F\u08b7\13F\5F\u08b9\nF\3F\3")
buf.write("F\3F\3F\5F\u08bf\nF\3F\5F\u08c2\nF\3F\3F\3F\3F\3F\5F\u08c9")
buf.write("\nF\3F\3F\3F\3F\7F\u08cf\nF\fF\16F\u08d2\13F\5F\u08d4")
buf.write("\nF\3F\3F\3F\3F\7F\u08da\nF\fF\16F\u08dd\13F\5F\u08df")
buf.write("\nF\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3")
buf.write("G\3G\3G\3G\3G\3G\3G\3G\5G\u08f9\nG\3H\3H\3H\3H\3H\3H\3")
buf.write("H\3H\3H\5H\u0904\nH\3I\3I\3I\5I\u0909\nI\3I\3I\3I\3I\3")
buf.write("I\7I\u0910\nI\fI\16I\u0913\13I\3J\3J\3J\3J\3J\3J\3J\3")
buf.write("J\7J\u091d\nJ\fJ\16J\u0920\13J\3J\3J\3J\3J\3J\3J\3J\3")
buf.write("J\3J\3J\3J\3J\5J\u092e\nJ\3K\3K\5K\u0932\nK\3K\3K\5K\u0936")
buf.write("\nK\3K\3K\5K\u093a\nK\3K\3K\3K\3K\5K\u0940\nK\3K\3K\5")
buf.write("K\u0944\nK\3K\3K\5K\u0948\nK\3K\3K\5K\u094c\nK\5K\u094e")
buf.write("\nK\3L\3L\3L\3L\3M\3M\3M\3M\5M\u0958\nM\3N\3N\3N\3N\3")
buf.write("N\5N\u095f\nN\3O\3O\3O\3O\3O\3O\3O\5O\u0968\nO\3P\3P\3")
buf.write("P\3P\3P\5P\u096f\nP\3Q\3Q\3Q\3Q\3Q\5Q\u0976\nQ\3R\3R\3")
buf.write("R\7R\u097b\nR\fR\16R\u097e\13R\3S\3S\3T\3T\3T\7T\u0985")
buf.write("\nT\fT\16T\u0988\13T\3U\3U\3U\3U\3U\3U\3V\3V\3W\3W\3W")
buf.write("\5W\u0995\nW\3X\3X\3X\3X\3X\5X\u099c\nX\3Y\3Y\3Y\7Y\u09a1")
buf.write("\nY\fY\16Y\u09a4\13Y\3Z\3Z\3Z\3Z\3Z\5Z\u09ab\nZ\3[\5[")
buf.write("\u09ae\n[\3[\3[\5[\u09b2\n[\3[\3[\5[\u09b6\n[\3[\5[\u09b9")
buf.write("\n[\3\\\3\\\3\\\2\t$<`df|\u0090]\2\4\6\b\n\f\16\20\22")
buf.write("\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPR")
buf.write("TVXZ\\^`bdfhjlnprtvxz|~\u0080\u0082\u0084\u0086\u0088")
buf.write("\u008a\u008c\u008e\u0090\u0092\u0094\u0096\u0098\u009a")
buf.write("\u009c\u009e\u00a0\u00a2\u00a4\u00a6\u00a8\u00aa\u00ac")
buf.write("\u00ae\u00b0\u00b2\u00b4\u00b6\2\37\4\2$$\u00bf\u00bf")
buf.write("\4\2??ss\4\2\u00c9\u00c9\u00da\u00da\4\2\\\\jj\4\2OOk")
buf.write("k\3\2\u00c6\u00c7\4\2XX\u008f\u008f\4\2\u010d\u010d\u0111")
buf.write("\u0111\4\2NN\u00eb\u00eb\4\2\35\35BB\4\2XXyy\4\2\26\26")
buf.write("EE\4\2 \u00d9\u00d9\4\2ll\u00cd\u00cd\3\2\u0107\u0108")
buf.write("\3\2\u0109\u010b\4\2WW\u00c8\u00c8\3\2\u0101\u0106\5\2")
buf.write("\26\26\32\32\u00d4\u00d4\4\2TT\u00e4\u00e4\7\2==gg\u008c")
buf.write("\u008d\u00cb\u00cb\u00ff\u00ff\3\2\u0090\u0093\4\2YY\u00b1")
buf.write("\u00b1\5\2bbxx\u00dd\u00dd\6\2FFtt\u0083\u0083\u00f2\u00f2")
buf.write("\4\2\u00a0\u00a0\u00fe\u00fe\7\2//@@oo\u00ce\u00ce\u00ee")
buf.write("\u00ee\4\2\u00e1\u00e1\u00f5\u00f5\66\2\23\26\30\30\32")
buf.write("\33\35 #$\',..\62\62;=??ABDDFGJJLLOORRUY[[^beegiklnnq")
buf.write("qstvvxz||~~\u0080\u0080\u0083\u008d\u008f\u0095\u0099")
buf.write("\u009d\u009f\u00a1\u00a4\u00a4\u00a6\u00b2\u00b4\u00b7")
buf.write("\u00b9\u00c0\u00c2\u00c4\u00c6\u00cd\u00cf\u00d9\u00db")
buf.write("\u00dd\u00df\u00e3\u00e5\u00e7\u00e9\u00ea\u00ec\u00ec")
buf.write("\u00ee\u00f0\u00f2\u00f2\u00f4\u00f6\u00f9\u00f9\u00fb")
buf.write("\u0100\2\u0b4c\2\u00b8\3\2\2\2\4\u00bb\3\2\2\2\6\u00be")
buf.write("\3\2\2\2\b\u00c1\3\2\2\2\n\u00c4\3\2\2\2\f\u03bb\3\2\2")
buf.write("\2\16\u03be\3\2\2\2\20\u03c2\3\2\2\2\22\u03d0\3\2\2\2")
buf.write("\24\u03d2\3\2\2\2\26\u03e0\3\2\2\2\30\u03e6\3\2\2\2\32")
buf.write("\u03ea\3\2\2\2\34\u03f2\3\2\2\2\36\u03f6\3\2\2\2 \u041b")
buf.write("\3\2\2\2\"\u041d\3\2\2\2$\u041f\3\2\2\2&\u0443\3\2\2\2")
buf.write("(\u0445\3\2\2\2*\u044d\3\2\2\2,\u047d\3\2\2\2.\u04af\3")
buf.write("\2\2\2\60\u04be\3\2\2\2\62\u04c0\3\2\2\2\64\u04c7\3\2")
buf.write("\2\2\66\u04e4\3\2\2\28\u04ed\3\2\2\2:\u04fe\3\2\2\2<\u0500")
buf.write("\3\2\2\2>\u0527\3\2\2\2@\u0537\3\2\2\2B\u0539\3\2\2\2")
buf.write("D\u0542\3\2\2\2F\u054a\3\2\2\2H\u0550\3\2\2\2J\u0552\3")
buf.write("\2\2\2L\u05a7\3\2\2\2N\u05b6\3\2\2\2P\u05c1\3\2\2\2R\u05d6")
buf.write("\3\2\2\2T\u05d8\3\2\2\2V\u05e5\3\2\2\2X\u05e9\3\2\2\2")
buf.write("Z\u05f3\3\2\2\2\\\u061e\3\2\2\2^\u0620\3\2\2\2`\u0629")
buf.write("\3\2\2\2b\u0673\3\2\2\2d\u0679\3\2\2\2f\u07ac\3\2\2\2")
buf.write("h\u07bb\3\2\2\2j\u07c1\3\2\2\2l\u07c9\3\2\2\2n\u07d1\3")
buf.write("\2\2\2p\u07d3\3\2\2\2r\u07d5\3\2\2\2t\u07d7\3\2\2\2v\u07d9")
buf.write("\3\2\2\2x\u07e3\3\2\2\2z\u07e5\3\2\2\2|\u0842\3\2\2\2")
buf.write("~\u0854\3\2\2\2\u0080\u0858\3\2\2\2\u0082\u085a\3\2\2")
buf.write("\2\u0084\u085f\3\2\2\2\u0086\u08a5\3\2\2\2\u0088\u08a7")
buf.write("\3\2\2\2\u008a\u08b8\3\2\2\2\u008c\u08f8\3\2\2\2\u008e")
buf.write("\u0903\3\2\2\2\u0090\u0905\3\2\2\2\u0092\u092d\3\2\2\2")
buf.write("\u0094\u094d\3\2\2\2\u0096\u094f\3\2\2\2\u0098\u0957\3")
buf.write("\2\2\2\u009a\u095e\3\2\2\2\u009c\u0967\3\2\2\2\u009e\u096e")
buf.write("\3\2\2\2\u00a0\u0975\3\2\2\2\u00a2\u0977\3\2\2\2\u00a4")
buf.write("\u097f\3\2\2\2\u00a6\u0981\3\2\2\2\u00a8\u0989\3\2\2\2")
buf.write("\u00aa\u098f\3\2\2\2\u00ac\u0994\3\2\2\2\u00ae\u099b\3")
buf.write("\2\2\2\u00b0\u099d\3\2\2\2\u00b2\u09aa\3\2\2\2\u00b4\u09b8")
buf.write("\3\2\2\2\u00b6\u09ba\3\2\2\2\u00b8\u00b9\5\f\7\2\u00b9")
buf.write("\u00ba\7\2\2\3\u00ba\3\3\2\2\2\u00bb\u00bc\5^\60\2\u00bc")
buf.write("\u00bd\7\2\2\3\u00bd\5\3\2\2\2\u00be\u00bf\5\u00a2R\2")
buf.write("\u00bf\u00c0\7\2\2\3\u00c0\7\3\2\2\2\u00c1\u00c2\5|?\2")
buf.write("\u00c2\u00c3\7\2\2\3\u00c3\t\3\2\2\2\u00c4\u00c5\5\u0090")
buf.write("I\2\u00c5\u00c6\7\2\2\3\u00c6\13\3\2\2\2\u00c7\u03bc\5")
buf.write("\16\b\2\u00c8\u00c9\7\u00ef\2\2\u00c9\u03bc\5\u00b2Z\2")
buf.write("\u00ca\u00cb\7\u00ef\2\2\u00cb\u00cc\5\u00b2Z\2\u00cc")
buf.write("\u00cd\7\3\2\2\u00cd\u00ce\5\u00b2Z\2\u00ce\u03bc\3\2")
buf.write("\2\2\u00cf\u00d0\7/\2\2\u00d0\u00d4\7\u00c9\2\2\u00d1")
buf.write("\u00d2\7h\2\2\u00d2\u00d3\7\u0097\2\2\u00d3\u00d5\7Q\2")
buf.write("\2\u00d4\u00d1\3\2\2\2\u00d4\u00d5\3\2\2\2\u00d5\u00d6")
buf.write("\3\2\2\2\u00d6\u00d9\5\u00a6T\2\u00d7\u00d8\7\37\2\2\u00d8")
buf.write("\u00da\5\u00aeX\2\u00d9\u00d7\3\2\2\2\u00d9\u00da\3\2")
buf.write("\2\2\u00da\u00dd\3\2\2\2\u00db\u00dc\7\u00fa\2\2\u00dc")
buf.write("\u00de\5\30\r\2\u00dd\u00db\3\2\2\2\u00dd\u00de\3\2\2")
buf.write("\2\u00de\u03bc\3\2\2\2\u00df\u00e0\7H\2\2\u00e0\u00e3")
buf.write("\7\u00c9\2\2\u00e1\u00e2\7h\2\2\u00e2\u00e4\7Q\2\2\u00e3")
buf.write("\u00e1\3\2\2\2\u00e3\u00e4\3\2\2\2\u00e4\u00e5\3\2\2\2")
buf.write("\u00e5\u00e7\5\u00a6T\2\u00e6\u00e8\t\2\2\2\u00e7\u00e6")
buf.write("\3\2\2\2\u00e7\u00e8\3\2\2\2\u00e8\u03bc\3\2\2\2\u00e9")
buf.write("\u00ea\7\27\2\2\u00ea\u00eb\7\u00c9\2\2\u00eb\u00ec\5")
buf.write("\u00a6T\2\u00ec\u00ed\7\u00ba\2\2\u00ed\u00ee\7\u00e2")
buf.write("\2\2\u00ee\u00ef\5\u00b2Z\2\u00ef\u03bc\3\2\2\2\u00f0")
buf.write("\u00f1\7\27\2\2\u00f1\u00f2\7\u00c9\2\2\u00f2\u00f3\5")
buf.write("\u00a6T\2\u00f3\u00f4\7\u00d1\2\2\u00f4\u00f5\7\37\2\2")
buf.write("\u00f5\u00f6\5\u00aeX\2\u00f6\u03bc\3\2\2\2\u00f7\u00f8")
buf.write("\7/\2\2\u00f8\u00fc\7\u00da\2\2\u00f9\u00fa\7h\2\2\u00fa")
buf.write("\u00fb\7\u0097\2\2\u00fb\u00fd\7Q\2\2\u00fc\u00f9\3\2")
buf.write("\2\2\u00fc\u00fd\3\2\2\2\u00fd\u00fe\3\2\2\2\u00fe\u0100")
buf.write("\5\u00a6T\2\u00ff\u0101\5Z.\2\u0100\u00ff\3\2\2\2\u0100")
buf.write("\u0101\3\2\2\2\u0101\u0104\3\2\2\2\u0102\u0103\7*\2\2")
buf.write("\u0103\u0105\5l\67\2\u0104\u0102\3\2\2\2\u0104\u0105\3")
buf.write("\2\2\2\u0105\u0108\3\2\2\2\u0106\u0107\7\u00fa\2\2\u0107")
buf.write("\u0109\5\30\r\2\u0108\u0106\3\2\2\2\u0108\u0109\3\2\2")
buf.write("\2\u0109\u010a\3\2\2\2\u010a\u0110\7\34\2\2\u010b\u0111")
buf.write("\5\16\b\2\u010c\u010d\7\4\2\2\u010d\u010e\5\16\b\2\u010e")
buf.write("\u010f\7\5\2\2\u010f\u0111\3\2\2\2\u0110\u010b\3\2\2\2")
buf.write("\u0110\u010c\3\2\2\2\u0111\u0117\3\2\2\2\u0112\u0114\7")
buf.write("\u00fa\2\2\u0113\u0115\7\u0094\2\2\u0114\u0113\3\2\2\2")
buf.write("\u0114\u0115\3\2\2\2\u0115\u0116\3\2\2\2\u0116\u0118\7")
buf.write(";\2\2\u0117\u0112\3\2\2\2\u0117\u0118\3\2\2\2\u0118\u03bc")
buf.write("\3\2\2\2\u0119\u011a\7/\2\2\u011a\u011e\7\u00da\2\2\u011b")
buf.write("\u011c\7h\2\2\u011c\u011d\7\u0097\2\2\u011d\u011f\7Q\2")
buf.write("\2\u011e\u011b\3\2\2\2\u011e\u011f\3\2\2\2\u011f\u0120")
buf.write("\3\2\2\2\u0120\u0121\5\u00a6T\2\u0121\u0122\7\4\2\2\u0122")
buf.write("\u0127\5\22\n\2\u0123\u0124\7\6\2\2\u0124\u0126\5\22\n")
buf.write("\2\u0125\u0123\3\2\2\2\u0126\u0129\3\2\2\2\u0127\u0125")
buf.write("\3\2\2\2\u0127\u0128\3\2\2\2\u0128\u012a\3\2\2\2\u0129")
buf.write("\u0127\3\2\2\2\u012a\u012d\7\5\2\2\u012b\u012c\7*\2\2")
buf.write("\u012c\u012e\5l\67\2\u012d\u012b\3\2\2\2\u012d\u012e\3")
buf.write("\2\2\2\u012e\u0131\3\2\2\2\u012f\u0130\7\u00fa\2\2\u0130")
buf.write("\u0132\5\30\r\2\u0131\u012f\3\2\2\2\u0131\u0132\3\2\2")
buf.write("\2\u0132\u03bc\3\2\2\2\u0133\u0134\7H\2\2\u0134\u0137")
buf.write("\7\u00da\2\2\u0135\u0136\7h\2\2\u0136\u0138\7Q\2\2\u0137")
buf.write("\u0135\3\2\2\2\u0137\u0138\3\2\2\2\u0138\u0139\3\2\2\2")
buf.write("\u0139\u03bc\5\u00a6T\2\u013a\u013b\7o\2\2\u013b\u013c")
buf.write("\7r\2\2\u013c\u013e\5\u00a6T\2\u013d\u013f\5Z.\2\u013e")
buf.write("\u013d\3\2\2\2\u013e\u013f\3\2\2\2\u013f\u0140\3\2\2\2")
buf.write("\u0140\u0141\5\16\b\2\u0141\u03bc\3\2\2\2\u0142\u0143")
buf.write("\7@\2\2\u0143\u0144\7\\\2\2\u0144\u0147\5\u00a6T\2\u0145")
buf.write("\u0146\7\u00f8\2\2\u0146\u0148\5`\61\2\u0147\u0145\3\2")
buf.write("\2\2\u0147\u0148\3\2\2\2\u0148\u03bc\3\2\2\2\u0149\u014a")
buf.write("\7\u00e5\2\2\u014a\u014b\7\u00da\2\2\u014b\u03bc\5\u00a6")
buf.write("T\2\u014c\u014d\7*\2\2\u014d\u014e\7\u009e\2\2\u014e\u014f")
buf.write("\7\u00da\2\2\u014f\u0150\5\u00a6T\2\u0150\u0153\7u\2\2")
buf.write("\u0151\u0154\5l\67\2\u0152\u0154\7\u0098\2\2\u0153\u0151")
buf.write("\3\2\2\2\u0153\u0152\3\2\2\2\u0154\u03bc\3\2\2\2\u0155")
buf.write("\u0156\7*\2\2\u0156\u0157\7\u009e\2\2\u0157\u0158\7(\2")
buf.write("\2\u0158\u0159\5\u00a6T\2\u0159\u015c\7u\2\2\u015a\u015d")
buf.write("\5l\67\2\u015b\u015d\7\u0098\2\2\u015c\u015a\3\2\2\2\u015c")
buf.write("\u015b\3\2\2\2\u015d\u03bc\3\2\2\2\u015e\u015f\7\27\2")
buf.write("\2\u015f\u0162\7\u00da\2\2\u0160\u0161\7h\2\2\u0161\u0163")
buf.write("\7Q\2\2\u0162\u0160\3\2\2\2\u0162\u0163\3\2\2\2\u0163")
buf.write("\u0164\3\2\2\2\u0164\u0165\5\u00a6T\2\u0165\u0166\7\u00ba")
buf.write("\2\2\u0166\u0167\7\u00e2\2\2\u0167\u0168\5\u00a6T\2\u0168")
buf.write("\u03bc\3\2\2\2\u0169\u016a\7\27\2\2\u016a\u016d\7\u00da")
buf.write("\2\2\u016b\u016c\7h\2\2\u016c\u016e\7Q\2\2\u016d\u016b")
buf.write("\3\2\2\2\u016d\u016e\3\2\2\2\u016e\u016f\3\2\2\2\u016f")
buf.write("\u0170\5\u00a6T\2\u0170\u0171\7\23\2\2\u0171\u0175\7(")
buf.write("\2\2\u0172\u0173\7h\2\2\u0173\u0174\7\u0097\2\2\u0174")
buf.write("\u0176\7Q\2\2\u0175\u0172\3\2\2\2\u0175\u0176\3\2\2\2")
buf.write("\u0176\u0177\3\2\2\2\u0177\u0178\5\24\13\2\u0178\u03bc")
buf.write("\3\2\2\2\u0179\u017a\7\27\2\2\u017a\u017d\7\u00da\2\2")
buf.write("\u017b\u017c\7h\2\2\u017c\u017e\7Q\2\2\u017d\u017b\3\2")
buf.write("\2\2\u017d\u017e\3\2\2\2\u017e\u017f\3\2\2\2\u017f\u0180")
buf.write("\5\u00a6T\2\u0180\u0181\7\u00ba\2\2\u0181\u0184\7(\2\2")
buf.write("\u0182\u0183\7h\2\2\u0183\u0185\7Q\2\2\u0184\u0182\3\2")
buf.write("\2\2\u0184\u0185\3\2\2\2\u0185\u0186\3\2\2\2\u0186\u0187")
buf.write("\5\u00b2Z\2\u0187\u0188\7\u00e2\2\2\u0188\u0189\5\u00b2")
buf.write("Z\2\u0189\u03bc\3\2\2\2\u018a\u018b\7\27\2\2\u018b\u018e")
buf.write("\7\u00da\2\2\u018c\u018d\7h\2\2\u018d\u018f\7Q\2\2\u018e")
buf.write("\u018c\3\2\2\2\u018e\u018f\3\2\2\2\u018f\u0190\3\2\2\2")
buf.write("\u0190\u0191\5\u00a6T\2\u0191\u0192\7H\2\2\u0192\u0195")
buf.write("\7(\2\2\u0193\u0194\7h\2\2\u0194\u0196\7Q\2\2\u0195\u0193")
buf.write("\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0197\3\2\2\2\u0197")
buf.write("\u0198\5\u00a6T\2\u0198\u03bc\3\2\2\2\u0199\u019a\7\27")
buf.write("\2\2\u019a\u019b\7\u00da\2\2\u019b\u019c\5\u00a6T\2\u019c")
buf.write("\u019d\7\u00d1\2\2\u019d\u019e\7\37\2\2\u019e\u019f\5")
buf.write("\u00aeX\2\u019f\u03bc\3\2\2\2\u01a0\u01a1\7\27\2\2\u01a1")
buf.write("\u01a2\7\u00da\2\2\u01a2\u01a3\5\u00a6T\2\u01a3\u01a4")
buf.write("\7\u00d1\2\2\u01a4\u01a5\7\u00b5\2\2\u01a5\u01a6\5\32")
buf.write("\16\2\u01a6\u03bc\3\2\2\2\u01a7\u01a8\7\27\2\2\u01a8\u01a9")
buf.write("\7\u00da\2\2\u01a9\u01aa\5\u00a6T\2\u01aa\u01ab\7P\2\2")
buf.write("\u01ab\u01b8\5\u00b2Z\2\u01ac\u01b5\7\4\2\2\u01ad\u01b2")
buf.write("\5\u009eP\2\u01ae\u01af\7\6\2\2\u01af\u01b1\5\u009eP\2")
buf.write("\u01b0\u01ae\3\2\2\2\u01b1\u01b4\3\2\2\2\u01b2\u01b0\3")
buf.write("\2\2\2\u01b2\u01b3\3\2\2\2\u01b3\u01b6\3\2\2\2\u01b4\u01b2")
buf.write("\3\2\2\2\u01b5\u01ad\3\2\2\2\u01b5\u01b6\3\2\2\2\u01b6")
buf.write("\u01b7\3\2\2\2\u01b7\u01b9\7\5\2\2\u01b8\u01ac\3\2\2\2")
buf.write("\u01b8\u01b9\3\2\2\2\u01b9\u01bc\3\2\2\2\u01ba\u01bb\7")
buf.write("\u00f8\2\2\u01bb\u01bd\5`\61\2\u01bc\u01ba\3\2\2\2\u01bc")
buf.write("\u01bd\3\2\2\2\u01bd\u03bc\3\2\2\2\u01be\u01bf\7\30\2")
buf.write("\2\u01bf\u01c2\5\u00a6T\2\u01c0\u01c1\7\u00fa\2\2\u01c1")
buf.write("\u01c3\5\30\r\2\u01c2\u01c0\3\2\2\2\u01c2\u01c3\3\2\2")
buf.write("\2\u01c3\u03bc\3\2\2\2\u01c4\u01c7\7/\2\2\u01c5\u01c6")
buf.write("\7\u00a2\2\2\u01c6\u01c8\7\u00bc\2\2\u01c7\u01c5\3\2\2")
buf.write("\2\u01c7\u01c8\3\2\2\2\u01c8\u01c9\3\2\2\2\u01c9\u01ca")
buf.write("\7\u0089\2\2\u01ca\u01ce\7\u00f6\2\2\u01cb\u01cc\7h\2")
buf.write("\2\u01cc\u01cd\7\u0097\2\2\u01cd\u01cf\7Q\2\2\u01ce\u01cb")
buf.write("\3\2\2\2\u01ce\u01cf\3\2\2\2\u01cf\u01d0\3\2\2\2\u01d0")
buf.write("\u01d3\5\u00a6T\2\u01d1\u01d2\7*\2\2\u01d2\u01d4\5l\67")
buf.write("\2\u01d3\u01d1\3\2\2\2\u01d3\u01d4\3\2\2\2\u01d4\u01d7")
buf.write("\3\2\2\2\u01d5\u01d6\7\u00fa\2\2\u01d6\u01d8\5\30\r\2")
buf.write("\u01d7\u01d5\3\2\2\2\u01d7\u01d8\3\2\2\2\u01d8\u01d9\3")
buf.write("\2\2\2\u01d9\u01da\7\34\2\2\u01da\u01db\5\16\b\2\u01db")
buf.write("\u03bc\3\2\2\2\u01dc\u01df\7/\2\2\u01dd\u01de\7\u00a2")
buf.write("\2\2\u01de\u01e0\7\u00bc\2\2\u01df\u01dd\3\2\2\2\u01df")
buf.write("\u01e0\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e2\7\u00f6")
buf.write("\2\2\u01e2\u01e5\5\u00a6T\2\u01e3\u01e4\7*\2\2\u01e4\u01e6")
buf.write("\5l\67\2\u01e5\u01e3\3\2\2\2\u01e5\u01e6\3\2\2\2\u01e6")
buf.write("\u01e9\3\2\2\2\u01e7\u01e8\7\u00cc\2\2\u01e8\u01ea\t\3")
buf.write("\2\2\u01e9\u01e7\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea\u01eb")
buf.write("\3\2\2\2\u01eb\u01ec\7\34\2\2\u01ec\u01ed\5\16\b\2\u01ed")
buf.write("\u03bc\3\2\2\2\u01ee\u01ef\7\u00b9\2\2\u01ef\u01f0\7\u0089")
buf.write("\2\2\u01f0\u01f1\7\u00f6\2\2\u01f1\u03bc\5\u00a6T\2\u01f2")
buf.write("\u01f3\7H\2\2\u01f3\u01f4\7\u0089\2\2\u01f4\u01f7\7\u00f6")
buf.write("\2\2\u01f5\u01f6\7h\2\2\u01f6\u01f8\7Q\2\2\u01f7\u01f5")
buf.write("\3\2\2\2\u01f7\u01f8\3\2\2\2\u01f8\u01f9\3\2\2\2\u01f9")
buf.write("\u03bc\5\u00a6T\2\u01fa\u01fb\7\27\2\2\u01fb\u01fc\7\u0089")
buf.write("\2\2\u01fc\u01ff\7\u00f6\2\2\u01fd\u01fe\7h\2\2\u01fe")
buf.write("\u0200\7Q\2\2\u01ff\u01fd\3\2\2\2\u01ff\u0200\3\2\2\2")
buf.write("\u0200\u0201\3\2\2\2\u0201\u0202\5\u00a6T\2\u0202\u0203")
buf.write("\7\u00ba\2\2\u0203\u0204\7\u00e2\2\2\u0204\u0205\5\u00a6")
buf.write("T\2\u0205\u03bc\3\2\2\2\u0206\u0207\7H\2\2\u0207\u020a")
buf.write("\7\u00f6\2\2\u0208\u0209\7h\2\2\u0209\u020b\7Q\2\2\u020a")
buf.write("\u0208\3\2\2\2\u020a\u020b\3\2\2\2\u020b\u020c\3\2\2\2")
buf.write("\u020c\u03bc\5\u00a6T\2\u020d\u020e\7\27\2\2\u020e\u020f")
buf.write("\7\u00f6\2\2\u020f\u0210\5\u00a6T\2\u0210\u0211\7\u00ba")
buf.write("\2\2\u0211\u0212\7\u00e2\2\2\u0212\u0213\5\u00a6T\2\u0213")
buf.write("\u03bc\3\2\2\2\u0214\u0215\7\27\2\2\u0215\u0216\7\u00f6")
buf.write("\2\2\u0216\u0217\5\u00a6T\2\u0217\u0218\7\u00d1\2\2\u0218")
buf.write("\u0219\7\37\2\2\u0219\u021a\5\u00aeX\2\u021a\u03bc\3\2")
buf.write("\2\2\u021b\u021c\7#\2\2\u021c\u021d\5\u00a6T\2\u021d\u0226")
buf.write("\7\4\2\2\u021e\u0223\5\u009eP\2\u021f\u0220\7\6\2\2\u0220")
buf.write("\u0222\5\u009eP\2\u0221\u021f\3\2\2\2\u0222\u0225\3\2")
buf.write("\2\2\u0223\u0221\3\2\2\2\u0223\u0224\3\2\2\2\u0224\u0227")
buf.write("\3\2\2\2\u0225\u0223\3\2\2\2\u0226\u021e\3\2\2\2\u0226")
buf.write("\u0227\3\2\2\2\u0227\u0228\3\2\2\2\u0228\u0229\7\5\2\2")
buf.write("\u0229\u03bc\3\2\2\2\u022a\u022b\7/\2\2\u022b\u022c\7")
buf.write("\u00c2\2\2\u022c\u0230\5\u00b2Z\2\u022d\u022e\7\u00fa")
buf.write("\2\2\u022e\u022f\7\24\2\2\u022f\u0231\5\u00acW\2\u0230")
buf.write("\u022d\3\2\2\2\u0230\u0231\3\2\2\2\u0231\u0234\3\2\2\2")
buf.write("\u0232\u0233\7j\2\2\u0233\u0235\5\u00b2Z\2\u0234\u0232")
buf.write("\3\2\2\2\u0234\u0235\3\2\2\2\u0235\u03bc\3\2\2\2\u0236")
buf.write("\u0237\7H\2\2\u0237\u0238\7\u00c2\2\2\u0238\u023b\5\u00b2")
buf.write("Z\2\u0239\u023a\7j\2\2\u023a\u023c\5\u00b2Z\2\u023b\u0239")
buf.write("\3\2\2\2\u023b\u023c\3\2\2\2\u023c\u03bc\3\2\2\2\u023d")
buf.write("\u023e\7_\2\2\u023e\u023f\5\u00b0Y\2\u023f\u0240\7\u00e2")
buf.write("\2\2\u0240\u0245\5\u00aeX\2\u0241\u0242\7\6\2\2\u0242")
buf.write("\u0244\5\u00aeX\2\u0243\u0241\3\2\2\2\u0244\u0247\3\2")
buf.write("\2\2\u0245\u0243\3\2\2\2\u0245\u0246\3\2\2\2\u0246\u024b")
buf.write("\3\2\2\2\u0247\u0245\3\2\2\2\u0248\u0249\7\u00fa\2\2\u0249")
buf.write("\u024a\7\24\2\2\u024a\u024c\7\u00a1\2\2\u024b\u0248\3")
buf.write("\2\2\2\u024b\u024c\3\2\2\2\u024c\u0250\3\2\2\2\u024d\u024e")
buf.write("\7`\2\2\u024e\u024f\7\"\2\2\u024f\u0251\5\u00acW\2\u0250")
buf.write("\u024d\3\2\2\2\u0250\u0251\3\2\2\2\u0251\u0254\3\2\2\2")
buf.write("\u0252\u0253\7j\2\2\u0253\u0255\5\u00b2Z\2\u0254\u0252")
buf.write("\3\2\2\2\u0254\u0255\3\2\2\2\u0255\u03bc\3\2\2\2\u0256")
buf.write("\u025a\7\u00c0\2\2\u0257\u0258\7\24\2\2\u0258\u0259\7")
buf.write("\u00a1\2\2\u0259\u025b\7Z\2\2\u025a\u0257\3\2\2\2\u025a")
buf.write("\u025b\3\2\2\2\u025b\u025c\3\2\2\2\u025c\u025d\5\u00b0")
buf.write("Y\2\u025d\u025e\7\\\2\2\u025e\u0263\5\u00aeX\2\u025f\u0260")
buf.write("\7\6\2\2\u0260\u0262\5\u00aeX\2\u0261\u025f\3\2\2\2\u0262")
buf.write("\u0265\3\2\2\2\u0263\u0261\3\2\2\2\u0263\u0264\3\2\2\2")
buf.write("\u0264\u0269\3\2\2\2\u0265\u0263\3\2\2\2\u0266\u0267\7")
buf.write("`\2\2\u0267\u0268\7\"\2\2\u0268\u026a\5\u00acW\2\u0269")
buf.write("\u0266\3\2\2\2\u0269\u026a\3\2\2\2\u026a\u026d\3\2\2\2")
buf.write("\u026b\u026c\7j\2\2\u026c\u026e\5\u00b2Z\2\u026d\u026b")
buf.write("\3\2\2\2\u026d\u026e\3\2\2\2\u026e\u03bc\3\2\2\2\u026f")
buf.write("\u0270\7\u00d1\2\2\u0270\u0274\7\u00c2\2\2\u0271\u0275")
buf.write("\7\26\2\2\u0272\u0275\7\u0095\2\2\u0273\u0275\5\u00b2")
buf.write("Z\2\u0274\u0271\3\2\2\2\u0274\u0272\3\2\2\2\u0274\u0273")
buf.write("\3\2\2\2\u0275\u0278\3\2\2\2\u0276\u0277\7j\2\2\u0277")
buf.write("\u0279\5\u00b2Z\2\u0278\u0276\3\2\2\2\u0278\u0279\3\2")
buf.write("\2\2\u0279\u03bc\3\2\2\2\u027a\u0285\7_\2\2\u027b\u0280")
buf.write("\5\u00a4S\2\u027c\u027d\7\6\2\2\u027d\u027f\5\u00a4S\2")
buf.write("\u027e\u027c\3\2\2\2\u027f\u0282\3\2\2\2\u0280\u027e\3")
buf.write("\2\2\2\u0280\u0281\3\2\2\2\u0281\u0286\3\2\2\2\u0282\u0280")
buf.write("\3\2\2\2\u0283\u0284\7\26\2\2\u0284\u0286\7\u00b4\2\2")
buf.write("\u0285\u027b\3\2\2\2\u0285\u0283\3\2\2\2\u0286\u0287\3")
buf.write("\2\2\2\u0287\u0289\7\u009e\2\2\u0288\u028a\t\4\2\2\u0289")
buf.write("\u0288\3\2\2\2\u0289\u028a\3\2\2\2\u028a\u028b\3\2\2\2")
buf.write("\u028b\u028c\5\u00a6T\2\u028c\u028d\7\u00e2\2\2\u028d")
buf.write("\u0291\5\u00aeX\2\u028e\u028f\7\u00fa\2\2\u028f\u0290")
buf.write("\7_\2\2\u0290\u0292\7\u00a1\2\2\u0291\u028e\3\2\2\2\u0291")
buf.write("\u0292\3\2\2\2\u0292\u03bc\3\2\2\2\u0293\u029e\7A\2\2")
buf.write("\u0294\u0299\5\u00a4S\2\u0295\u0296\7\6\2\2\u0296\u0298")
buf.write("\5\u00a4S\2\u0297\u0295\3\2\2\2\u0298\u029b\3\2\2\2\u0299")
buf.write("\u0297\3\2\2\2\u0299\u029a\3\2\2\2\u029a\u029f\3\2\2\2")
buf.write("\u029b\u0299\3\2\2\2\u029c\u029d\7\26\2\2\u029d\u029f")
buf.write("\7\u00b4\2\2\u029e\u0294\3\2\2\2\u029e\u029c\3\2\2\2\u029f")
buf.write("\u02a0\3\2\2\2\u02a0\u02a2\7\u009e\2\2\u02a1\u02a3\t\4")
buf.write("\2\2\u02a2\u02a1\3\2\2\2\u02a2\u02a3\3\2\2\2\u02a3\u02a4")
buf.write("\3\2\2\2\u02a4\u02a5\5\u00a6T\2\u02a5\u02a6\7\u00e2\2")
buf.write("\2\u02a6\u02a7\5\u00aeX\2\u02a7\u03bc\3\2\2\2\u02a8\u02ac")
buf.write("\7\u00c0\2\2\u02a9\u02aa\7_\2\2\u02aa\u02ab\7\u00a1\2")
buf.write("\2\u02ab\u02ad\7Z\2\2\u02ac\u02a9\3\2\2\2\u02ac\u02ad")
buf.write("\3\2\2\2\u02ad\u02b8\3\2\2\2\u02ae\u02b3\5\u00a4S\2\u02af")
buf.write("\u02b0\7\6\2\2\u02b0\u02b2\5\u00a4S\2\u02b1\u02af\3\2")
buf.write("\2\2\u02b2\u02b5\3\2\2\2\u02b3\u02b1\3\2\2\2\u02b3\u02b4")
buf.write("\3\2\2\2\u02b4\u02b9\3\2\2\2\u02b5\u02b3\3\2\2\2\u02b6")
buf.write("\u02b7\7\26\2\2\u02b7\u02b9\7\u00b4\2\2\u02b8\u02ae\3")
buf.write("\2\2\2\u02b8\u02b6\3\2\2\2\u02b9\u02ba\3\2\2\2\u02ba\u02bc")
buf.write("\7\u009e\2\2\u02bb\u02bd\t\4\2\2\u02bc\u02bb\3\2\2\2\u02bc")
buf.write("\u02bd\3\2\2\2\u02bd\u02be\3\2\2\2\u02be\u02bf\5\u00a6")
buf.write("T\2\u02bf\u02c0\7\\\2\2\u02c0\u02c1\5\u00aeX\2\u02c1\u03bc")
buf.write("\3\2\2\2\u02c2\u02c3\7\u00d3\2\2\u02c3\u02c9\7a\2\2\u02c4")
buf.write("\u02c6\7\u009e\2\2\u02c5\u02c7\7\u00da\2\2\u02c6\u02c5")
buf.write("\3\2\2\2\u02c6\u02c7\3\2\2\2\u02c7\u02c8\3\2\2\2\u02c8")
buf.write("\u02ca\5\u00a6T\2\u02c9\u02c4\3\2\2\2\u02c9\u02ca\3\2")
buf.write("\2\2\u02ca\u03bc\3\2\2\2\u02cb\u02d7\7R\2\2\u02cc\u02cd")
buf.write("\7\4\2\2\u02cd\u02d2\5\u0098M\2\u02ce\u02cf\7\6\2\2\u02cf")
buf.write("\u02d1\5\u0098M\2\u02d0\u02ce\3\2\2\2\u02d1\u02d4\3\2")
buf.write("\2\2\u02d2\u02d0\3\2\2\2\u02d2\u02d3\3\2\2\2\u02d3\u02d5")
buf.write("\3\2\2\2\u02d4\u02d2\3\2\2\2\u02d5\u02d6\7\5\2\2\u02d6")
buf.write("\u02d8\3\2\2\2\u02d7\u02cc\3\2\2\2\u02d7\u02d8\3\2\2\2")
buf.write("\u02d8\u02d9\3\2\2\2\u02d9\u03bc\5\f\7\2\u02da\u02db\7")
buf.write("R\2\2\u02db\u02dd\7\30\2\2\u02dc\u02de\7\u00f4\2\2\u02dd")
buf.write("\u02dc\3\2\2\2\u02dd\u02de\3\2\2\2\u02de\u02df\3\2\2\2")
buf.write("\u02df\u03bc\5\f\7\2\u02e0\u02e1\7\u00d3\2\2\u02e1\u02e2")
buf.write("\7/\2\2\u02e2\u02e3\7\u00da\2\2\u02e3\u03bc\5\u00a6T\2")
buf.write("\u02e4\u02e5\7\u00d3\2\2\u02e5\u02e6\7/\2\2\u02e6\u02e7")
buf.write("\7\u00c9\2\2\u02e7\u03bc\5\u00a6T\2\u02e8\u02e9\7\u00d3")
buf.write("\2\2\u02e9\u02ea\7/\2\2\u02ea\u02eb\7\u00f6\2\2\u02eb")
buf.write("\u03bc\5\u00a6T\2\u02ec\u02ed\7\u00d3\2\2\u02ed\u02ee")
buf.write("\7/\2\2\u02ee\u02ef\7\u0089\2\2\u02ef\u02f0\7\u00f6\2")
buf.write("\2\u02f0\u03bc\5\u00a6T\2\u02f1\u02f2\7\u00d3\2\2\u02f2")
buf.write("\u02f5\7\u00db\2\2\u02f3\u02f4\t\5\2\2\u02f4\u02f6\5\u00a6")
buf.write("T\2\u02f5\u02f3\3\2\2\2\u02f5\u02f6\3\2\2\2\u02f6\u02fd")
buf.write("\3\2\2\2\u02f7\u02f8\7}\2\2\u02f8\u02fb\5l\67\2\u02f9")
buf.write("\u02fa\7M\2\2\u02fa\u02fc\5l\67\2\u02fb\u02f9\3\2\2\2")
buf.write("\u02fb\u02fc\3\2\2\2\u02fc\u02fe\3\2\2\2\u02fd\u02f7\3")
buf.write("\2\2\2\u02fd\u02fe\3\2\2\2\u02fe\u03bc\3\2\2\2\u02ff\u0300")
buf.write("\7\u00d3\2\2\u0300\u0303\7\u00ca\2\2\u0301\u0302\t\5\2")
buf.write("\2\u0302\u0304\5\u00b2Z\2\u0303\u0301\3\2\2\2\u0303\u0304")
buf.write("\3\2\2\2\u0304\u030b\3\2\2\2\u0305\u0306\7}\2\2\u0306")
buf.write("\u0309\5l\67\2\u0307\u0308\7M\2\2\u0308\u030a\5l\67\2")
buf.write("\u0309\u0307\3\2\2\2\u0309\u030a\3\2\2\2\u030a\u030c\3")
buf.write("\2\2\2\u030b\u0305\3\2\2\2\u030b\u030c\3\2\2\2\u030c\u03bc")
buf.write("\3\2\2\2\u030d\u030e\7\u00d3\2\2\u030e\u0315\7\'\2\2\u030f")
buf.write("\u0310\7}\2\2\u0310\u0313\5l\67\2\u0311\u0312\7M\2\2\u0312")
buf.write("\u0314\5l\67\2\u0313\u0311\3\2\2\2\u0313\u0314\3\2\2\2")
buf.write("\u0314\u0316\3\2\2\2\u0315\u030f\3\2\2\2\u0315\u0316\3")
buf.write("\2\2\2\u0316\u03bc\3\2\2\2\u0317\u0318\7\u00d3\2\2\u0318")
buf.write("\u0319\7)\2\2\u0319\u031b\t\5\2\2\u031a\u031c\5\u00a6")
buf.write("T\2\u031b\u031a\3\2\2\2\u031b\u031c\3\2\2\2\u031c\u0323")
buf.write("\3\2\2\2\u031d\u031e\7}\2\2\u031e\u0321\5l\67\2\u031f")
buf.write("\u0320\7M\2\2\u0320\u0322\5l\67\2\u0321\u031f\3\2\2\2")
buf.write("\u0321\u0322\3\2\2\2\u0322\u0324\3\2\2\2\u0323\u031d\3")
buf.write("\2\2\2\u0323\u0324\3\2\2\2\u0324\u03bc\3\2\2\2\u0325\u0326")
buf.write("\7\u00d3\2\2\u0326\u0327\7\u00d6\2\2\u0327\u0328\7Z\2")
buf.write("\2\u0328\u03bc\5\u00a6T\2\u0329\u032a\7\u00d3\2\2\u032a")
buf.write("\u032b\7\u00d6\2\2\u032b\u032c\7Z\2\2\u032c\u032d\7\4")
buf.write("\2\2\u032d\u032e\5\16\b\2\u032e\u032f\7\5\2\2\u032f\u03bc")
buf.write("\3\2\2\2\u0330\u0332\7\u00d3\2\2\u0331\u0333\7\62\2\2")
buf.write("\u0332\u0331\3\2\2\2\u0332\u0333\3\2\2\2\u0333\u0334\3")
buf.write("\2\2\2\u0334\u0337\7\u00c3\2\2\u0335\u0336\t\5\2\2\u0336")
buf.write("\u0338\5\u00b2Z\2\u0337\u0335\3\2\2\2\u0337\u0338\3\2")
buf.write("\2\2\u0338\u03bc\3\2\2\2\u0339\u033a\7\u00d3\2\2\u033a")
buf.write("\u033b\7\u00c2\2\2\u033b\u033e\7a\2\2\u033c\u033d\t\5")
buf.write("\2\2\u033d\u033f\5\u00b2Z\2\u033e\u033c\3\2\2\2\u033e")
buf.write("\u033f\3\2\2\2\u033f\u03bc\3\2\2\2\u0340\u0341\7C\2\2")
buf.write("\u0341\u03bc\5\u00a6T\2\u0342\u0343\7B\2\2\u0343\u03bc")
buf.write("\5\u00a6T\2\u0344\u0345\7\u00d3\2\2\u0345\u034c\7^\2\2")
buf.write("\u0346\u0347\7}\2\2\u0347\u034a\5l\67\2\u0348\u0349\7")
buf.write("M\2\2\u0349\u034b\5l\67\2\u034a\u0348\3\2\2\2\u034a\u034b")
buf.write("\3\2\2\2\u034b\u034d\3\2\2\2\u034c\u0346\3\2\2\2\u034c")
buf.write("\u034d\3\2\2\2\u034d\u03bc\3\2\2\2\u034e\u034f\7\u00d3")
buf.write("\2\2\u034f\u0356\7\u00d0\2\2\u0350\u0351\7}\2\2\u0351")
buf.write("\u0354\5l\67\2\u0352\u0353\7M\2\2\u0353\u0355\5l\67\2")
buf.write("\u0354\u0352\3\2\2\2\u0354\u0355\3\2\2\2\u0355\u0357\3")
buf.write("\2\2\2\u0356\u0350\3\2\2\2\u0356\u0357\3\2\2\2\u0357\u03bc")
buf.write("\3\2\2\2\u0358\u0359\7\u00d1\2\2\u0359\u035a\7\u00d0\2")
buf.write("\2\u035a\u035b\5\u00a6T\2\u035b\u035c\7\u0101\2\2\u035c")
buf.write("\u035d\5^\60\2\u035d\u03bc\3\2\2\2\u035e\u035f\7\u00bd")
buf.write("\2\2\u035f\u0360\7\u00d0\2\2\u0360\u03bc\5\u00a6T\2\u0361")
buf.write("\u0362\7\u00d5\2\2\u0362\u036b\7\u00e3\2\2\u0363\u0368")
buf.write("\5\u009aN\2\u0364\u0365\7\6\2\2\u0365\u0367\5\u009aN\2")
buf.write("\u0366\u0364\3\2\2\2\u0367\u036a\3\2\2\2\u0368\u0366\3")
buf.write("\2\2\2\u0368\u0369\3\2\2\2\u0369\u036c\3\2\2\2\u036a\u0368")
buf.write("\3\2\2\2\u036b\u0363\3\2\2\2\u036b\u036c\3\2\2\2\u036c")
buf.write("\u03bc\3\2\2\2\u036d\u036f\7+\2\2\u036e\u0370\7\u00fd")
buf.write("\2\2\u036f\u036e\3\2\2\2\u036f\u0370\3\2\2\2\u0370\u03bc")
buf.write("\3\2\2\2\u0371\u0373\7\u00c4\2\2\u0372\u0374\7\u00fd\2")
buf.write("\2\u0373\u0372\3\2\2\2\u0373\u0374\3\2\2\2\u0374\u03bc")
buf.write("\3\2\2\2\u0375\u0376\7\u00b3\2\2\u0376\u0377\5\u00b2Z")
buf.write("\2\u0377\u0378\7\\\2\2\u0378\u0379\5\f\7\2\u0379\u03bc")
buf.write("\3\2\2\2\u037a\u037b\7>\2\2\u037b\u037c\7\u00b3\2\2\u037c")
buf.write("\u03bc\5\u00b2Z\2\u037d\u037e\7P\2\2\u037e\u0388\5\u00b2")
buf.write("Z\2\u037f\u0380\7\u00f1\2\2\u0380\u0385\5^\60\2\u0381")
buf.write("\u0382\7\6\2\2\u0382\u0384\5^\60\2\u0383\u0381\3\2\2\2")
buf.write("\u0384\u0387\3\2\2\2\u0385\u0383\3\2\2\2\u0385\u0386\3")
buf.write("\2\2\2\u0386\u0389\3\2\2\2\u0387\u0385\3\2\2\2\u0388\u037f")
buf.write("\3\2\2\2\u0388\u0389\3\2\2\2\u0389\u03bc\3\2\2\2\u038a")
buf.write("\u038b\7C\2\2\u038b\u038c\7n\2\2\u038c\u03bc\5\u00b2Z")
buf.write("\2\u038d\u038e\7C\2\2\u038e\u038f\7\u00a6\2\2\u038f\u03bc")
buf.write("\5\u00b2Z\2\u0390\u0391\7\u00d1\2\2\u0391\u0392\7\u00ac")
buf.write("\2\2\u0392\u03bc\5\u00a2R\2\u0393\u0394\7\u00d1\2\2\u0394")
buf.write("\u0395\7\u00e0\2\2\u0395\u0398\7\u0100\2\2\u0396\u0399")
buf.write("\7\u0080\2\2\u0397\u0399\5^\60\2\u0398\u0396\3\2\2\2\u0398")
buf.write("\u0397\3\2\2\2\u0399\u03bc\3\2\2\2\u039a\u039b\7\u00ee")
buf.write("\2\2\u039b\u039c\5\u00a6T\2\u039c\u039d\7\u00d1\2\2\u039d")
buf.write("\u03a2\5\u0096L\2\u039e\u039f\7\6\2\2\u039f\u03a1\5\u0096")
buf.write("L\2\u03a0\u039e\3\2\2\2\u03a1\u03a4\3\2\2\2\u03a2\u03a0")
buf.write("\3\2\2\2\u03a2\u03a3\3\2\2\2\u03a3\u03a7\3\2\2\2\u03a4")
buf.write("\u03a2\3\2\2\2\u03a5\u03a6\7\u00f8\2\2\u03a6\u03a8\5`")
buf.write("\61\2\u03a7\u03a5\3\2\2\2\u03a7\u03a8\3\2\2\2\u03a8\u03bc")
buf.write("\3\2\2\2\u03a9\u03aa\7\u008b\2\2\u03aa\u03ab\7r\2\2\u03ab")
buf.write("\u03b0\5\u00a6T\2\u03ac\u03ae\7\34\2\2\u03ad\u03ac\3\2")
buf.write("\2\2\u03ad\u03ae\3\2\2\2\u03ae\u03af\3\2\2\2\u03af\u03b1")
buf.write("\5\u00b2Z\2\u03b0\u03ad\3\2\2\2\u03b0\u03b1\3\2\2\2\u03b1")
buf.write("\u03b2\3\2\2\2\u03b2\u03b3\7\u00f1\2\2\u03b3\u03b4\5<")
buf.write("\37\2\u03b4\u03b5\7\u009e\2\2\u03b5\u03b7\5^\60\2\u03b6")
buf.write("\u03b8\5\u0086D\2\u03b7\u03b6\3\2\2\2\u03b8\u03b9\3\2")
buf.write("\2\2\u03b9\u03b7\3\2\2\2\u03b9\u03ba\3\2\2\2\u03ba\u03bc")
buf.write("\3\2\2\2\u03bb\u00c7\3\2\2\2\u03bb\u00c8\3\2\2\2\u03bb")
buf.write("\u00ca\3\2\2\2\u03bb\u00cf\3\2\2\2\u03bb\u00df\3\2\2\2")
buf.write("\u03bb\u00e9\3\2\2\2\u03bb\u00f0\3\2\2\2\u03bb\u00f7\3")
buf.write("\2\2\2\u03bb\u0119\3\2\2\2\u03bb\u0133\3\2\2\2\u03bb\u013a")
buf.write("\3\2\2\2\u03bb\u0142\3\2\2\2\u03bb\u0149\3\2\2\2\u03bb")
buf.write("\u014c\3\2\2\2\u03bb\u0155\3\2\2\2\u03bb\u015e\3\2\2\2")
buf.write("\u03bb\u0169\3\2\2\2\u03bb\u0179\3\2\2\2\u03bb\u018a\3")
buf.write("\2\2\2\u03bb\u0199\3\2\2\2\u03bb\u01a0\3\2\2\2\u03bb\u01a7")
buf.write("\3\2\2\2\u03bb\u01be\3\2\2\2\u03bb\u01c4\3\2\2\2\u03bb")
buf.write("\u01dc\3\2\2\2\u03bb\u01ee\3\2\2\2\u03bb\u01f2\3\2\2\2")
buf.write("\u03bb\u01fa\3\2\2\2\u03bb\u0206\3\2\2\2\u03bb\u020d\3")
buf.write("\2\2\2\u03bb\u0214\3\2\2\2\u03bb\u021b\3\2\2\2\u03bb\u022a")
buf.write("\3\2\2\2\u03bb\u0236\3\2\2\2\u03bb\u023d\3\2\2\2\u03bb")
buf.write("\u0256\3\2\2\2\u03bb\u026f\3\2\2\2\u03bb\u027a\3\2\2\2")
buf.write("\u03bb\u0293\3\2\2\2\u03bb\u02a8\3\2\2\2\u03bb\u02c2\3")
buf.write("\2\2\2\u03bb\u02cb\3\2\2\2\u03bb\u02da\3\2\2\2\u03bb\u02e0")
buf.write("\3\2\2\2\u03bb\u02e4\3\2\2\2\u03bb\u02e8\3\2\2\2\u03bb")
buf.write("\u02ec\3\2\2\2\u03bb\u02f1\3\2\2\2\u03bb\u02ff\3\2\2\2")
buf.write("\u03bb\u030d\3\2\2\2\u03bb\u0317\3\2\2\2\u03bb\u0325\3")
buf.write("\2\2\2\u03bb\u0329\3\2\2\2\u03bb\u0330\3\2\2\2\u03bb\u0339")
buf.write("\3\2\2\2\u03bb\u0340\3\2\2\2\u03bb\u0342\3\2\2\2\u03bb")
buf.write("\u0344\3\2\2\2\u03bb\u034e\3\2\2\2\u03bb\u0358\3\2\2\2")
buf.write("\u03bb\u035e\3\2\2\2\u03bb\u0361\3\2\2\2\u03bb\u036d\3")
buf.write("\2\2\2\u03bb\u0371\3\2\2\2\u03bb\u0375\3\2\2\2\u03bb\u037a")
buf.write("\3\2\2\2\u03bb\u037d\3\2\2\2\u03bb\u038a\3\2\2\2\u03bb")
buf.write("\u038d\3\2\2\2\u03bb\u0390\3\2\2\2\u03bb\u0393\3\2\2\2")
buf.write("\u03bb\u039a\3\2\2\2\u03bb\u03a9\3\2\2\2\u03bc\r\3\2\2")
buf.write("\2\u03bd\u03bf\5\20\t\2\u03be\u03bd\3\2\2\2\u03be\u03bf")
buf.write("\3\2\2\2\u03bf\u03c0\3\2\2\2\u03c0\u03c1\5\36\20\2\u03c1")
buf.write("\17\3\2\2\2\u03c2\u03c4\7\u00fa\2\2\u03c3\u03c5\7\u00b8")
buf.write("\2\2\u03c4\u03c3\3\2\2\2\u03c4\u03c5\3\2\2\2\u03c5\u03c6")
buf.write("\3\2\2\2\u03c6\u03cb\5\66\34\2\u03c7\u03c8\7\6\2\2\u03c8")
buf.write("\u03ca\5\66\34\2\u03c9\u03c7\3\2\2\2\u03ca\u03cd\3\2\2")
buf.write("\2\u03cb\u03c9\3\2\2\2\u03cb\u03cc\3\2\2\2\u03cc\21\3")
buf.write("\2\2\2\u03cd\u03cb\3\2\2\2\u03ce\u03d1\5\24\13\2\u03cf")
buf.write("\u03d1\5\26\f\2\u03d0\u03ce\3\2\2\2\u03d0\u03cf\3\2\2")
buf.write("\2\u03d1\23\3\2\2\2\u03d2\u03d3\5\u00b2Z\2\u03d3\u03d6")
buf.write("\5|?\2\u03d4\u03d5\7\u0097\2\2\u03d5\u03d7\7\u0098\2\2")
buf.write("\u03d6\u03d4\3\2\2\2\u03d6\u03d7\3\2\2\2\u03d7\u03da\3")
buf.write("\2\2\2\u03d8\u03d9\7*\2\2\u03d9\u03db\5l\67\2\u03da\u03d8")
buf.write("\3\2\2\2\u03da\u03db\3\2\2\2\u03db\u03de\3\2\2\2\u03dc")
buf.write("\u03dd\7\u00fa\2\2\u03dd\u03df\5\30\r\2\u03de\u03dc\3")
buf.write("\2\2\2\u03de\u03df\3\2\2\2\u03df\25\3\2\2\2\u03e0\u03e1")
buf.write("\7}\2\2\u03e1\u03e4\5\u00a6T\2\u03e2\u03e3\t\6\2\2\u03e3")
buf.write("\u03e5\7\u00b5\2\2\u03e4\u03e2\3\2\2\2\u03e4\u03e5\3\2")
buf.write("\2\2\u03e5\27\3\2\2\2\u03e6\u03e7\7\4\2\2\u03e7\u03e8")
buf.write("\5\32\16\2\u03e8\u03e9\7\5\2\2\u03e9\31\3\2\2\2\u03ea")
buf.write("\u03ef\5\34\17\2\u03eb\u03ec\7\6\2\2\u03ec\u03ee\5\34")
buf.write("\17\2\u03ed\u03eb\3\2\2\2\u03ee\u03f1\3\2\2\2\u03ef\u03ed")
buf.write("\3\2\2\2\u03ef\u03f0\3\2\2\2\u03f0\33\3\2\2\2\u03f1\u03ef")
buf.write("\3\2\2\2\u03f2\u03f3\5\u00b2Z\2\u03f3\u03f4\7\u0101\2")
buf.write("\2\u03f4\u03f5\5^\60\2\u03f5\35\3\2\2\2\u03f6\u0401\5")
buf.write("$\23\2\u03f7\u03f8\7\u00a3\2\2\u03f8\u03f9\7\"\2\2\u03f9")
buf.write("\u03fe\5(\25\2\u03fa\u03fb\7\6\2\2\u03fb\u03fd\5(\25\2")
buf.write("\u03fc\u03fa\3\2\2\2\u03fd\u0400\3\2\2\2\u03fe\u03fc\3")
buf.write("\2\2\2\u03fe\u03ff\3\2\2\2\u03ff\u0402\3\2\2\2\u0400\u03fe")
buf.write("\3\2\2\2\u0401\u03f7\3\2\2\2\u0401\u0402\3\2\2\2\u0402")
buf.write("\u0408\3\2\2\2\u0403\u0404\7\u009b\2\2\u0404\u0406\5\"")
buf.write("\22\2\u0405\u0407\t\7\2\2\u0406\u0405\3\2\2\2\u0406\u0407")
buf.write("\3\2\2\2\u0407\u0409\3\2\2\2\u0408\u0403\3\2\2\2\u0408")
buf.write("\u0409\3\2\2\2\u0409\u0417\3\2\2\2\u040a\u040b\7~\2\2")
buf.write("\u040b\u0418\5 \21\2\u040c\u040d\7U\2\2\u040d\u040f\t")
buf.write("\b\2\2\u040e\u0410\5\"\22\2\u040f\u040e\3\2\2\2\u040f")
buf.write("\u0410\3\2\2\2\u0410\u0411\3\2\2\2\u0411\u0415\t\7\2\2")
buf.write("\u0412\u0416\7\u00a0\2\2\u0413\u0414\7\u00fa\2\2\u0414")
buf.write("\u0416\7\u00df\2\2\u0415\u0412\3\2\2\2\u0415\u0413\3\2")
buf.write("\2\2\u0416\u0418\3\2\2\2\u0417\u040a\3\2\2\2\u0417\u040c")
buf.write("\3\2\2\2\u0417\u0418\3\2\2\2\u0418\37\3\2\2\2\u0419\u041c")
buf.write("\7\26\2\2\u041a\u041c\5\"\22\2\u041b\u0419\3\2\2\2\u041b")
buf.write("\u041a\3\2\2\2\u041c!\3\2\2\2\u041d\u041e\t\t\2\2\u041e")
buf.write("
buf.write("\u0430\3\2\2\2\u0422\u0423\f\4\2\2\u0423\u0425\7p\2\2")
buf.write("\u0424\u0426\58\35\2\u0425\u0424\3\2\2\2\u0425\u0426\3")
buf.write("\2\2\2\u0426\u0427\3\2\2\2\u0427\u042f\5$\23\5\u0428\u0429")
buf.write("\f\3\2\2\u0429\u042b\t\n\2\2\u042a\u042c\58\35\2\u042b")
buf.write("\u042a\3\2\2\2\u042b\u042c\3\2\2\2\u042c\u042d\3\2\2\2")
buf.write("\u042d\u042f\5$\23\4\u042e\u0422\3\2\2\2\u042e\u0428\3")
buf.write("\2\2\2\u042f\u0432\3\2\2\2\u0430\u042e\3\2\2\2\u0430\u0431")
buf.write("\3\2\2\2\u0431%\3\2\2\2\u0432\u0430\3\2\2\2\u0433\u0444")
buf.write("\5*\26\2\u0434\u0435\7\u00da\2\2\u0435\u0444\5\u00a6T")
buf.write("\2\u0436\u0437\7\u00f3\2\2\u0437\u043c\5^\60\2\u0438\u0439")
buf.write("\7\6\2\2\u0439\u043b\5^\60\2\u043a\u0438\3\2\2\2\u043b")
buf.write("\u043e\3\2\2\2\u043c\u043a\3\2\2\2\u043c\u043d\3\2\2\2")
buf.write("\u043d\u0444\3\2\2\2\u043e\u043c\3\2\2\2\u043f\u0440\7")
buf.write("\4\2\2\u0440\u0441\5\36\20\2\u0441\u0442\7\5\2\2\u0442")
buf.write("\u0444\3\2\2\2\u0443\u0433\3\2\2\2\u0443\u0434\3\2\2\2")
buf.write("\u0443\u0436\3\2\2\2\u0443\u043f\3\2\2\2\u0444\'\3\2\2")
buf.write("\2\u0445\u0447\5^\60\2\u0446\u0448\t\13\2\2\u0447\u0446")
buf.write("\3\2\2\2\u0447\u0448\3\2\2\2\u0448\u044b\3\2\2\2\u0449")
buf.write("\u044a\7\u009a\2\2\u044a\u044c\t\f\2\2\u044b\u0449\3\2")
buf.write("\2\2\u044b\u044c\3\2\2\2\u044c)\3\2\2\2\u044d\u044f\7")
buf.write("\u00ce\2\2\u044e\u0450\58\35\2\u044f\u044e\3\2\2\2\u044f")
buf.write("\u0450\3\2\2\2\u0450\u0451\3\2\2\2\u0451\u0456\5:\36\2")
buf.write("\u0452\u0453\7\6\2\2\u0453\u0455\5:\36\2\u0454\u0452\3")
buf.write("\2\2\2\u0455\u0458\3\2\2\2\u0456\u0454\3\2\2\2\u0456\u0457")
buf.write("\3\2\2\2\u0457\u0462\3\2\2\2\u0458\u0456\3\2\2\2\u0459")
buf.write("\u045a\7\\\2\2\u045a\u045f\5<\37\2\u045b\u045c\7\6\2\2")
buf.write("\u045c\u045e\5<\37\2\u045d\u045b\3\2\2\2\u045e\u0461\3")
buf.write("\2\2\2\u045f\u045d\3\2\2\2\u045f\u0460\3\2\2\2\u0460\u0463")
buf.write("\3\2\2\2\u0461\u045f\3\2\2\2\u0462\u0459\3\2\2\2\u0462")
buf.write("\u0463\3\2\2\2\u0463\u0466\3\2\2\2\u0464\u0465\7\u00f8")
buf.write("\2\2\u0465\u0467\5`\61\2\u0466\u0464\3\2\2\2\u0466\u0467")
buf.write("\3\2\2\2\u0467\u046b\3\2\2\2\u0468\u0469\7c\2\2\u0469")
buf.write("\u046a\7\"\2\2\u046a\u046c\5,\27\2\u046b\u0468\3\2\2\2")
buf.write("\u046b\u046c\3\2\2\2\u046c\u046f\3\2\2\2\u046d\u046e\7")
buf.write("f\2\2\u046e\u0470\5`\61\2\u046f\u046d\3\2\2\2\u046f\u0470")
buf.write("\3\2\2\2\u0470\u047a\3\2\2\2\u0471\u0472\7\u00f9\2\2\u0472")
buf.write("\u0477\5\62\32\2\u0473\u0474\7\6\2\2\u0474\u0476\5\62")
buf.write("\32\2\u0475\u0473\3\2\2\2\u0476\u0479\3\2\2\2\u0477\u0475")
buf.write("\3\2\2\2\u0477\u0478\3\2\2\2\u0478\u047b\3\2\2\2\u0479")
buf.write("\u0477\3\2\2\2\u047a\u0471\3\2\2\2\u047a\u047b\3\2\2\2")
buf.write("\u047b+\3\2\2\2\u047c\u047e\58\35\2\u047d\u047c\3\2\2")
buf.write("\2\u047d\u047e\3\2\2\2\u047e\u047f\3\2\2\2\u047f\u0484")
buf.write("\5.\30\2\u0480\u0481\7\6\2\2\u0481\u0483\5.\30\2\u0482")
buf.write("\u0480\3\2\2\2\u0483\u0486\3\2\2\2\u0484\u0482\3\2\2\2")
buf.write("\u0484\u0485\3\2\2\2\u0485-\3\2\2\2\u0486\u0484\3\2\2")
buf.write("\2\u0487\u04b0\5\60\31\2\u0488\u0489\7\u00c5\2\2\u0489")
buf.write("\u0492\7\4\2\2\u048a\u048f\5^\60\2\u048b\u048c\7\6\2\2")
buf.write("\u048c\u048e\5^\60\2\u048d\u048b\3\2\2\2\u048e\u0491\3")
buf.write("\2\2\2\u048f\u048d\3\2\2\2\u048f\u0490\3\2\2\2\u0490\u0493")
buf.write("\3\2\2\2\u0491\u048f\3\2\2\2\u0492\u048a\3\2\2\2\u0492")
buf.write("\u0493\3\2\2\2\u0493\u0494\3\2\2\2\u0494\u04b0\7\5\2\2")
buf.write("\u0495\u0496\7\61\2\2\u0496\u049f\7\4\2\2\u0497\u049c")
buf.write("\5^\60\2\u0498\u0499\7\6\2\2\u0499\u049b\5^\60\2\u049a")
buf.write("\u0498\3\2\2\2\u049b\u049e\3\2\2\2\u049c\u049a\3\2\2\2")
buf.write("\u049c\u049d\3\2\2\2\u049d\u04a0\3\2\2\2\u049e\u049c\3")
buf.write("\2\2\2\u049f\u0497\3\2\2\2\u049f\u04a0\3\2\2\2\u04a0\u04a1")
buf.write("\3\2\2\2\u04a1\u04b0\7\5\2\2\u04a2\u04a3\7d\2\2\u04a3")
buf.write("\u04a4\7\u00d2\2\2\u04a4\u04a5\7\4\2\2\u04a5\u04aa\5\60")
buf.write("\31\2\u04a6\u04a7\7\6\2\2\u04a7\u04a9\5\60\31\2\u04a8")
buf.write("\u04a6\3\2\2\2\u04a9\u04ac\3\2\2\2\u04aa\u04a8\3\2\2\2")
buf.write("\u04aa\u04ab\3\2\2\2\u04ab\u04ad\3\2\2\2\u04ac\u04aa\3")
buf.write("\2\2\2\u04ad\u04ae\7\5\2\2\u04ae\u04b0\3\2\2\2\u04af\u0487")
buf.write("\3\2\2\2\u04af\u0488\3\2\2\2\u04af\u0495\3\2\2\2\u04af")
buf.write("\u04a2\3\2\2\2\u04b0/\3\2\2\2\u04b1\u04ba\7\4\2\2\u04b2")
buf.write("\u04b7\5^\60\2\u04b3\u04b4\7\6\2\2\u04b4\u04b6\5^\60\2")
buf.write("\u04b5\u04b3\3\2\2\2\u04b6\u04b9\3\2\2\2\u04b7\u04b5\3")
buf.write("\2\2\2\u04b7\u04b8\3\2\2\2\u04b8\u04bb\3\2\2\2\u04b9\u04b7")
buf.write("\3\2\2\2\u04ba\u04b2\3\2\2\2\u04ba\u04bb\3\2\2\2\u04bb")
buf.write("\u04bc\3\2\2\2\u04bc\u04bf\7\5\2\2\u04bd\u04bf\5^\60\2")
buf.write("\u04be\u04b1\3\2\2\2\u04be\u04bd\3\2\2\2\u04bf\61\3\2")
buf.write("\2\2\u04c0\u04c1\5\u00b2Z\2\u04c1\u04c2\7\34\2\2\u04c2")
buf.write("\u04c3\7\4\2\2\u04c3\u04c4\5\64\33\2\u04c4\u04c5\7\5\2")
buf.write("\2\u04c5\63\3\2\2\2\u04c6\u04c8\5\u00b2Z\2\u04c7\u04c6")
buf.write("\3\2\2\2\u04c7\u04c8\3\2\2\2\u04c8\u04d3\3\2\2\2\u04c9")
buf.write("\u04ca\7\u00a9\2\2\u04ca\u04cb\7\"\2\2\u04cb\u04d0\5^")
buf.write("\60\2\u04cc\u04cd\7\6\2\2\u04cd\u04cf\5^\60\2\u04ce\u04cc")
buf.write("\3\2\2\2\u04cf\u04d2\3\2\2\2\u04d0\u04ce\3\2\2\2\u04d0")
buf.write("\u04d1\3\2\2\2\u04d1\u04d4\3\2\2\2\u04d2\u04d0\3\2\2\2")
buf.write("\u04d3\u04c9\3\2\2\2\u04d3\u04d4\3\2\2\2\u04d4\u04df\3")
buf.write("\2\2\2\u04d5\u04d6\7\u00a3\2\2\u04d6\u04d7\7\"\2\2\u04d7")
buf.write("\u04dc\5(\25\2\u04d8\u04d9\7\6\2\2\u04d9\u04db\5(\25\2")
buf.write("\u04da\u04d8\3\2\2\2\u04db\u04de\3\2\2\2\u04dc\u04da\3")
buf.write("\2\2\2\u04dc\u04dd\3\2\2\2\u04dd\u04e0\3\2\2\2\u04de\u04dc")
buf.write("\3\2\2\2\u04df\u04d5\3\2\2\2\u04df\u04e0\3\2\2\2\u04e0")
buf.write("\u04e2\3\2\2\2\u04e1\u04e3\5\u008aF\2\u04e2\u04e1\3\2")
buf.write("\2\2\u04e2\u04e3\3\2\2\2\u04e3\65\3\2\2\2\u04e4\u04e6")
buf.write("\5\u00b2Z\2\u04e5\u04e7\5Z.\2\u04e6\u04e5\3\2\2\2\u04e6")
buf.write("\u04e7\3\2\2\2\u04e7\u04e8\3\2\2\2\u04e8\u04e9\7\34\2")
buf.write("\2\u04e9\u04ea\7\4\2\2\u04ea\u04eb\5\16\b\2\u04eb\u04ec")
buf.write("\7\5\2\2\u04ec\67\3\2\2\2\u04ed\u04ee\t\r\2\2\u04ee9\3")
buf.write("\2\2\2\u04ef\u04f4\5^\60\2\u04f0\u04f2\7\34\2\2\u04f1")
buf.write("\u04f0\3\2\2\2\u04f1\u04f2\3\2\2\2\u04f2\u04f3\3\2\2\2")
buf.write("\u04f3\u04f5\5\u00b2Z\2\u04f4\u04f1\3\2\2\2\u04f4\u04f5")
buf.write("\3\2\2\2\u04f5\u04ff\3\2\2\2\u04f6\u04f7\5f\64\2\u04f7")
buf.write("\u04f8\7\3\2\2\u04f8\u04fb\7\u0109\2\2\u04f9\u04fa\7\34")
buf.write("\2\2\u04fa\u04fc\5Z.\2\u04fb\u04f9\3\2\2\2\u04fb\u04fc")
buf.write("\3\2\2\2\u04fc\u04ff\3\2\2\2\u04fd\u04ff\7\u0109\2\2\u04fe")
buf.write("\u04ef\3\2\2\2\u04fe\u04f6\3\2\2\2\u04fe\u04fd\3\2\2\2")
buf.write("\u04ff;\3\2\2\2\u0500\u0501\b\37\1\2\u0501\u0502\5B\"")
buf.write("\2\u0502\u0515\3\2\2\2\u0503\u0511\f\4\2\2\u0504\u0505")
buf.write("\7\60\2\2\u0505\u0506\7w\2\2\u0506\u0512\5B\"\2\u0507")
buf.write("\u0508\5> \2\u0508\u0509\7w\2\2\u0509\u050a\5<\37\2\u050a")
buf.write("\u050b\5@!\2\u050b\u0512\3\2\2\2\u050c\u050d\7\u008e\2")
buf.write("\2\u050d\u050e\5> \2\u050e\u050f\7w\2\2\u050f\u0510\5")
buf.write("B\"\2\u0510\u0512\3\2\2\2\u0511\u0504\3\2\2\2\u0511\u0507")
buf.write("\3\2\2\2\u0511\u050c\3\2\2\2\u0512\u0514\3\2\2\2\u0513")
buf.write("\u0503\3\2\2\2\u0514\u0517\3\2\2\2\u0515\u0513\3\2\2\2")
buf.write("\u0515\u0516\3\2\2\2\u0516=\3\2\2\2\u0517\u0515\3\2\2")
buf.write("\2\u0518\u051a\7m\2\2\u0519\u0518\3\2\2\2\u0519\u051a")
buf.write("\3\2\2\2\u051a\u0528\3\2\2\2\u051b\u051d\7{\2\2\u051c")
buf.write("\u051e\7\u00a5\2\2\u051d\u051c\3\2\2\2\u051d\u051e\3\2")
buf.write("\2\2\u051e\u0528\3\2\2\2\u051f\u0521\7\u00c1\2\2\u0520")
buf.write("\u0522\7\u00a5\2\2\u0521\u0520\3\2\2\2\u0521\u0522\3\2")
buf.write("\2\2\u0522\u0528\3\2\2\2\u0523\u0525\7]\2\2\u0524\u0526")
buf.write("\7\u00a5\2\2\u0525\u0524\3\2\2\2\u0525\u0526\3\2\2\2\u0526")
buf.write("\u0528\3\2\2\2\u0527\u0519\3\2\2\2\u0527\u051b\3\2\2\2")
buf.write("\u0527\u051f\3\2\2\2\u0527\u0523\3\2\2\2\u0528?\3\2\2")
buf.write("\2\u0529\u052a\7\u009e\2\2\u052a\u0538\5`\61\2\u052b\u052c")
buf.write("\7\u00f1\2\2\u052c\u052d\7\4\2\2\u052d\u0532\5\u00b2Z")
buf.write("\2\u052e\u052f\7\6\2\2\u052f\u0531\5\u00b2Z\2\u0530\u052e")
buf.write("\3\2\2\2\u0531\u0534\3\2\2\2\u0532\u0530\3\2\2\2\u0532")
buf.write("\u0533\3\2\2\2\u0533\u0535\3\2\2\2\u0534\u0532\3\2\2\2")
buf.write("\u0535\u0536\7\5\2\2\u0536\u0538\3\2\2\2\u0537\u0529\3")
buf.write("\2\2\2\u0537\u052b\3\2\2\2\u0538A\3\2\2\2\u0539\u0540")
buf.write("\5J&\2\u053a\u053b\7\u00dc\2\2\u053b\u053c\5D#\2\u053c")
buf.write("\u053d\7\4\2\2\u053d\u053e\5^\60\2\u053e\u053f\7\5\2\2")
buf.write("\u053f\u0541\3\2\2\2\u0540\u053a\3\2\2\2\u0540\u0541\3")
buf.write("\2\2\2\u0541C\3\2\2\2\u0542\u0543\t\16\2\2\u0543E\3\2")
buf.write("\2\2\u0544\u054b\7L\2\2\u0545\u0547\7\u00e5\2\2\u0546")
buf.write("\u0548\5l\67\2\u0547\u0546\3\2\2\2\u0547\u0548\3\2\2\2")
buf.write("\u0548\u0549\3\2\2\2\u0549\u054b\5H%\2\u054a\u0544\3\2")
buf.write("\2\2\u054a\u0545\3\2\2\2\u054bG\3\2\2\2\u054c\u054d\7")
buf.write("\u00fa\2\2\u054d\u0551\7.\2\2\u054e\u054f\7\u00fc\2\2")
buf.write("\u054f\u0551\7.\2\2\u0550\u054c\3\2\2\2\u0550\u054e\3")
buf.write("\2\2\2\u0551I\3\2\2\2\u0552\u05a5\5X-\2\u0553\u0554\7")
buf.write("\u0088\2\2\u0554\u055f\7\4\2\2\u0555\u0556\7\u00a9\2\2")
buf.write("\u0556\u0557\7\"\2\2\u0557\u055c\5^\60\2\u0558\u0559\7")
buf.write("\6\2\2\u0559\u055b\5^\60\2\u055a\u0558\3\2\2\2\u055b\u055e")
buf.write("\3\2\2\2\u055c\u055a\3\2\2\2\u055c\u055d\3\2\2\2\u055d")
buf.write("\u0560\3\2\2\2\u055e\u055c\3\2\2\2\u055f\u0555\3\2\2\2")
buf.write("\u055f\u0560\3\2\2\2\u0560\u056b\3\2\2\2\u0561\u0562\7")
buf.write("\u00a3\2\2\u0562\u0563\7\"\2\2\u0563\u0568\5(\25\2\u0564")
buf.write("\u0565\7\6\2\2\u0565\u0567\5(\25\2\u0566\u0564\3\2\2\2")
buf.write("\u0567\u056a\3\2\2\2\u0568\u0566\3\2\2\2\u0568\u0569\3")
buf.write("\2\2\2\u0569\u056c\3\2\2\2\u056a\u0568\3\2\2\2\u056b\u0561")
buf.write("\3\2\2\2\u056b\u056c\3\2\2\2\u056c\u0576\3\2\2\2\u056d")
buf.write("\u056e\7\u008a\2\2\u056e\u0573\5L\'\2\u056f\u0570\7\6")
buf.write("\2\2\u0570\u0572\5L\'\2\u0571\u056f\3\2\2\2\u0572\u0575")
buf.write("\3\2\2\2\u0573\u0571\3\2\2\2\u0573\u0574\3\2\2\2\u0574")
buf.write("\u0577\3\2\2\2\u0575\u0573\3\2\2\2\u0576\u056d\3\2\2\2")
buf.write("\u0576\u0577\3\2\2\2\u0577\u0579\3\2\2\2\u0578\u057a\5")
buf.write("N(\2\u0579\u0578\3\2\2\2\u0579\u057a\3\2\2\2\u057a\u057e")
buf.write("\3\2\2\2\u057b\u057c\7\25\2\2\u057c\u057d\7\u0085\2\2")
buf.write("\u057d\u057f\5R*\2\u057e\u057b\3\2\2\2\u057e\u057f\3\2")
buf.write("\2\2\u057f\u0581\3\2\2\2\u0580\u0582\t\17\2\2\u0581\u0580")
buf.write("\3\2\2\2\u0581\u0582\3\2\2\2\u0582\u0583\3\2\2\2\u0583")
buf.write("\u0584\7\u00ad\2\2\u0584\u0585\7\4\2\2\u0585\u0586\5\u0090")
buf.write("I\2\u0586\u0590\7\5\2\2\u0587\u0588\7\u00d7\2\2\u0588")
buf.write("\u058d\5T+\2\u0589\u058a\7\6\2\2\u058a\u058c\5T+\2\u058b")
buf.write("\u0589\3\2\2\2\u058c\u058f\3\2\2\2\u058d\u058b\3\2\2\2")
buf.write("\u058d\u058e\3\2\2\2\u058e\u0591\3\2\2\2\u058f\u058d\3")
buf.write("\2\2\2\u0590\u0587\3\2\2\2\u0590\u0591\3\2\2\2\u0591\u0592")
buf.write("\3\2\2\2\u0592\u0593\7D\2\2\u0593\u0598\5V,\2\u0594\u0595")
buf.write("\7\6\2\2\u0595\u0597\5V,\2\u0596\u0594\3\2\2\2\u0597\u059a")
buf.write("\3\2\2\2\u0598\u0596\3\2\2\2\u0598\u0599\3\2\2\2\u0599")
buf.write("\u059b\3\2\2\2\u059a\u0598\3\2\2\2\u059b\u05a3\7\5\2\2")
buf.write("\u059c\u059e\7\34\2\2\u059d\u059c\3\2\2\2\u059d\u059e")
buf.write("\3\2\2\2\u059e\u059f\3\2\2\2\u059f\u05a1\5\u00b2Z\2\u05a0")
buf.write("\u05a2\5Z.\2\u05a1\u05a0\3\2\2\2\u05a1\u05a2\3\2\2\2\u05a2")
buf.write("\u05a4\3\2\2\2\u05a3\u059d\3\2\2\2\u05a3\u05a4\3\2\2\2")
buf.write("\u05a4\u05a6\3\2\2\2\u05a5\u0553\3\2\2\2\u05a5\u05a6\3")
buf.write("\2\2\2\u05a6K\3\2\2\2\u05a7\u05a8\5^\60\2\u05a8\u05a9")
buf.write("\7\34\2\2\u05a9\u05aa\5\u00b2Z\2\u05aaM\3\2\2\2\u05ab")
buf.write("\u05ac\7\u009f\2\2\u05ac\u05ad\7\u00c6\2\2\u05ad\u05ae")
buf.write("\7\u00ae\2\2\u05ae\u05b7\7\u0085\2\2\u05af\u05b0\7\26")
buf.write("\2\2\u05b0\u05b1\7\u00c7\2\2\u05b1\u05b2\7\u00ae\2\2\u05b2")
buf.write("\u05b4\7\u0085\2\2\u05b3\u05b5\5P)\2\u05b4\u05b3\3\2\2")
buf.write("\2\u05b4\u05b5\3\2\2\2\u05b5\u05b7\3\2\2\2\u05b6\u05ab")
buf.write("\3\2\2\2\u05b6\u05af\3\2\2\2\u05b7O\3\2\2\2\u05b8\u05b9")
buf.write("\7\u00d3\2\2\u05b9\u05ba\7J\2\2\u05ba\u05c2\7\u0087\2")
buf.write("\2\u05bb\u05bc\7\u009c\2\2\u05bc\u05bd\7J\2\2\u05bd\u05c2")
buf.write("\7\u0087\2\2\u05be\u05bf\7\u00fa\2\2\u05bf\u05c0\7\u00ec")
buf.write("\2\2\u05c0\u05c2\7\u00c7\2\2\u05c1\u05b8\3\2\2\2\u05c1")
buf.write("\u05bb\3\2\2\2\u05c1\u05be\3\2\2\2\u05c2Q\3\2\2\2\u05c3")
buf.write("\u05c4\7\7\2\2\u05c4\u05c5\7\u00e2\2\2\u05c5\u05c6\7\u008f")
buf.write("\2\2\u05c6\u05d7\7\u00c6\2\2\u05c7\u05c8\7\7\2\2\u05c8")
buf.write("\u05c9\7\u00ab\2\2\u05c9\u05ca\7y\2\2\u05ca\u05d7\7\u00c6")
buf.write("\2\2\u05cb\u05cc\7\7\2\2\u05cc\u05cd\7\u00e2\2\2\u05cd")
buf.write("\u05ce\7X\2\2\u05ce\u05d7\5\u00b2Z\2\u05cf\u05d0\7\7\2")
buf.write("\2\u05d0\u05d1\7\u00e2\2\2\u05d1\u05d2\7y\2\2\u05d2\u05d7")
buf.write("\5\u00b2Z\2\u05d3\u05d4\7\7\2\2\u05d4\u05d5\7\u00e2\2")
buf.write("\2\u05d5\u05d7\5\u00b2Z\2\u05d6\u05c3\3\2\2\2\u05d6\u05c7")
buf.write("\3\2\2\2\u05d6\u05cb\3\2\2\2\u05d6\u05cf\3\2\2\2\u05d6")
buf.write("\u05d3\3\2\2\2\u05d7S\3\2\2\2\u05d8\u05d9\5\u00b2Z\2\u05d9")
buf.write("\u05da\7\u0101\2\2\u05da\u05db\7\4\2\2\u05db\u05e0\5\u00b2")
buf.write("Z\2\u05dc\u05dd\7\6\2\2\u05dd\u05df\5\u00b2Z\2\u05de\u05dc")
buf.write("\3\2\2\2\u05df\u05e2\3\2\2\2\u05e0\u05de\3\2\2\2\u05e0")
buf.write("\u05e1\3\2\2\2\u05e1\u05e3\3\2\2\2\u05e2\u05e0\3\2\2\2")
buf.write("\u05e3\u05e4\7\5\2\2\u05e4U\3\2\2\2\u05e5\u05e6\5\u00b2")
buf.write("Z\2\u05e6\u05e7\7\34\2\2\u05e7\u05e8\5^\60\2\u05e8W\3")
buf.write("\2\2\2\u05e9\u05f1\5\\/\2\u05ea\u05ec\7\34\2\2\u05eb\u05ea")
buf.write("\3\2\2\2\u05eb\u05ec\3\2\2\2\u05ec\u05ed\3\2\2\2\u05ed")
buf.write("\u05ef\5\u00b2Z\2\u05ee\u05f0\5Z.\2\u05ef\u05ee\3\2\2")
buf.write("\2\u05ef\u05f0\3\2\2\2\u05f0\u05f2\3\2\2\2\u05f1\u05eb")
buf.write("\3\2\2\2\u05f1\u05f2\3\2\2\2\u05f2Y\3\2\2\2\u05f3\u05f4")
buf.write("\7\4\2\2\u05f4\u05f9\5\u00b2Z\2\u05f5\u05f6\7\6\2\2\u05f6")
buf.write("\u05f8\5\u00b2Z\2\u05f7\u05f5\3\2\2\2\u05f8\u05fb\3\2")
buf.write("\2\2\u05f9\u05f7\3\2\2\2\u05f9\u05fa\3\2\2\2\u05fa\u05fc")
buf.write("\3\2\2\2\u05fb\u05f9\3\2\2\2\u05fc\u05fd\7\5\2\2\u05fd")
buf.write("[\3\2\2\2\u05fe\u0600\5\u00a6T\2\u05ff\u0601\5\u00a8U")
buf.write("\2\u0600\u05ff\3\2\2\2\u0600\u0601\3\2\2\2\u0601\u061f")
buf.write("\3\2\2\2\u0602\u0603\7\4\2\2\u0603\u0604\5\16\b\2\u0604")
buf.write("\u0605\7\5\2\2\u0605\u061f\3\2\2\2\u0606\u0607\7\u00ed")
buf.write("\2\2\u0607\u0608\7\4\2\2\u0608\u060d\5^\60\2\u0609\u060a")
buf.write("\7\6\2\2\u060a\u060c\5^\60\2\u060b\u0609\3\2\2\2\u060c")
buf.write("\u060f\3\2\2\2\u060d\u060b\3\2\2\2\u060d\u060e\3\2\2\2")
buf.write("\u060e\u0610\3\2\2\2\u060f\u060d\3\2\2\2\u0610\u0613\7")
buf.write("\5\2\2\u0611\u0612\7\u00fa\2\2\u0612\u0614\7\u00a4\2\2")
buf.write("\u0613\u0611\3\2\2\2\u0613\u0614\3\2\2\2\u0614\u061f\3")
buf.write("\2\2\2\u0615\u0616\7z\2\2\u0616\u0617\7\4\2\2\u0617\u0618")
buf.write("\5\16\b\2\u0618\u0619\7\5\2\2\u0619\u061f\3\2\2\2\u061a")
buf.write("\u061b\7\4\2\2\u061b\u061c\5<\37\2\u061c\u061d\7\5\2\2")
buf.write("\u061d\u061f\3\2\2\2\u061e\u05fe\3\2\2\2\u061e\u0602\3")
buf.write("\2\2\2\u061e\u0606\3\2\2\2\u061e\u0615\3\2\2\2\u061e\u061a")
buf.write("\3\2\2\2\u061f]\3\2\2\2\u0620\u0621\5`\61\2\u0621_\3\2")
buf.write("\2\2\u0622\u0623\b\61\1\2\u0623\u0625\5d\63\2\u0624\u0626")
buf.write("\5b\62\2\u0625\u0624\3\2\2\2\u0625\u0626\3\2\2\2\u0626")
buf.write("\u062a\3\2\2\2\u0627\u0628\7\u0097\2\2\u0628\u062a\5`")
buf.write("\61\5\u0629\u0622\3\2\2\2\u0629\u0627\3\2\2\2\u062a\u0633")
buf.write("\3\2\2\2\u062b\u062c\f\4\2\2\u062c\u062d\7\31\2\2\u062d")
buf.write("\u0632\5`\61\5\u062e\u062f\f\3\2\2\u062f\u0630\7\u00a2")
buf.write("\2\2\u0630\u0632\5`\61\4\u0631\u062b\3\2\2\2\u0631\u062e")
buf.write("\3\2\2\2\u0632\u0635\3\2\2\2\u0633\u0631\3\2\2\2\u0633")
buf.write("\u0634\3\2\2\2\u0634a\3\2\2\2\u0635\u0633\3\2\2\2\u0636")
buf.write("\u0637\5p9\2\u0637\u0638\5d\63\2\u0638\u0674\3\2\2\2\u0639")
buf.write("\u063a\5p9\2\u063a\u063b\5r:\2\u063b\u063c\7\4\2\2\u063c")
buf.write("\u063d\5\16\b\2\u063d\u063e\7\5\2\2\u063e\u0674\3\2\2")
buf.write("\2\u063f\u0641\7\u0097\2\2\u0640\u063f\3\2\2\2\u0640\u0641")
buf.write("\3\2\2\2\u0641\u0642\3\2\2\2\u0642\u0643\7!\2\2\u0643")
buf.write("\u0644\5d\63\2\u0644\u0645\7\31\2\2\u0645\u0646\5d\63")
buf.write("\2\u0646\u0674\3\2\2\2\u0647\u0649\7\u0097\2\2\u0648\u0647")
buf.write("\3\2\2\2\u0648\u0649\3\2\2\2\u0649\u064a\3\2\2\2\u064a")
buf.write("\u064b\7j\2\2\u064b\u064c\7\4\2\2\u064c\u0651\5^\60\2")
buf.write("\u064d\u064e\7\6\2\2\u064e\u0650\5^\60\2\u064f\u064d\3")
buf.write("\2\2\2\u0650\u0653\3\2\2\2\u0651\u064f\3\2\2\2\u0651\u0652")
buf.write("\3\2\2\2\u0652\u0654\3\2\2\2\u0653\u0651\3\2\2\2\u0654")
buf.write("\u0655\7\5\2\2\u0655\u0674\3\2\2\2\u0656\u0658\7\u0097")
buf.write("\2\2\u0657\u0656\3\2\2\2\u0657\u0658\3\2\2\2\u0658\u0659")
buf.write("\3\2\2\2\u0659\u065a\7j\2\2\u065a\u065b\7\4\2\2\u065b")
buf.write("\u065c\5\16\b\2\u065c\u065d\7\5\2\2\u065d\u0674\3\2\2")
buf.write("\2\u065e\u0660\7\u0097\2\2\u065f\u065e\3\2\2\2\u065f\u0660")
buf.write("\3\2\2\2\u0660\u0661\3\2\2\2\u0661\u0662\7}\2\2\u0662")
buf.write("\u0665\5d\63\2\u0663\u0664\7M\2\2\u0664\u0666\5d\63\2")
buf.write("\u0665\u0663\3\2\2\2\u0665\u0666\3\2\2\2\u0666\u0674\3")
buf.write("\2\2\2\u0667\u0669\7u\2\2\u0668\u066a\7\u0097\2\2\u0669")
buf.write("\u0668\3\2\2\2\u0669\u066a\3\2\2\2\u066a\u066b\3\2\2\2")
buf.write("\u066b\u0674\7\u0098\2\2\u066c\u066e\7u\2\2\u066d\u066f")
buf.write("\7\u0097\2\2\u066e\u066d\3\2\2\2\u066e\u066f\3\2\2\2\u066f")
buf.write("\u0670\3\2\2\2\u0670\u0671\7E\2\2\u0671\u0672\7\\\2\2")
buf.write("\u0672\u0674\5d\63\2\u0673\u0636\3\2\2\2\u0673\u0639\3")
buf.write("\2\2\2\u0673\u0640\3\2\2\2\u0673\u0648\3\2\2\2\u0673\u0657")
buf.write("\3\2\2\2\u0673\u065f\3\2\2\2\u0673\u0667\3\2\2\2\u0673")
buf.write("\u066c\3\2\2\2\u0674c\3\2\2\2\u0675\u0676\b\63\1\2\u0676")
buf.write("\u067a\5f\64\2\u0677\u0678\t\20\2\2\u0678\u067a\5d\63")
buf.write("\6\u0679\u0675\3\2\2\2\u0679\u0677\3\2\2\2\u067a\u0689")
buf.write("\3\2\2\2\u067b\u067c\f\5\2\2\u067c\u067d\t\21\2\2\u067d")
buf.write("\u0688\5d\63\6\u067e\u067f\f\4\2\2\u067f\u0680\t\20\2")
buf.write("\2\u0680\u0688\5d\63\5\u0681\u0682\f\3\2\2\u0682\u0683")
buf.write("\7\u010c\2\2\u0683\u0688\5d\63\4\u0684\u0685\f\7\2\2\u0685")
buf.write("\u0686\7\36\2\2\u0686\u0688\5n8\2\u0687\u067b\3\2\2\2")
buf.write("\u0687\u067e\3\2\2\2\u0687\u0681\3\2\2\2\u0687\u0684\3")
buf.write("\2\2\2\u0688\u068b\3\2\2\2\u0689\u0687\3\2\2\2\u0689\u068a")
buf.write("\3\2\2\2\u068ae\3\2\2\2\u068b\u0689\3\2\2\2\u068c\u068d")
buf.write("\b\64\1\2\u068d\u07ad\7\u0098\2\2\u068e\u07ad\5v<\2\u068f")
buf.write("\u0690\5\u00b2Z\2\u0690\u0691\5l\67\2\u0691\u07ad\3\2")
buf.write("\2\2\u0692\u0693\7G\2\2\u0693\u0694\7\u00b2\2\2\u0694")
buf.write("\u07ad\5l\67\2\u0695\u07ad\5\u00b4[\2\u0696\u07ad\5t;")
buf.write("\2\u0697\u07ad\5l\67\2\u0698\u07ad\7\u0110\2\2\u0699\u07ad")
buf.write("\7\u010d\2\2\u069a\u069b\7\u00b0\2\2\u069b\u069c\7\4\2")
buf.write("\2\u069c\u069d\5d\63\2\u069d\u069e\7j\2\2\u069e\u069f")
buf.write("\5d\63\2\u069f\u06a0\7\5\2\2\u06a0\u07ad\3\2\2\2\u06a1")
buf.write("\u06a2\7\4\2\2\u06a2\u06a5\5^\60\2\u06a3\u06a4\7\6\2\2")
buf.write("\u06a4\u06a6\5^\60\2\u06a5\u06a3\3\2\2\2\u06a6\u06a7\3")
buf.write("\2\2\2\u06a7\u06a5\3\2\2\2\u06a7\u06a8\3\2\2\2\u06a8\u06a9")
buf.write("\3\2\2\2\u06a9\u06aa\7\5\2\2\u06aa\u07ad\3\2\2\2\u06ab")
buf.write("\u06ac\7\u00c6\2\2\u06ac\u06ad\7\4\2\2\u06ad\u06b2\5^")
buf.write("\60\2\u06ae\u06af\7\6\2\2\u06af\u06b1\5^\60\2\u06b0\u06ae")
buf.write("\3\2\2\2\u06b1\u06b4\3\2\2\2\u06b2\u06b0\3\2\2\2\u06b2")
buf.write("\u06b3\3\2\2\2\u06b3\u06b5\3\2\2\2\u06b4\u06b2\3\2\2\2")
buf.write("\u06b5\u06b6\7\5\2\2\u06b6\u07ad\3\2\2\2\u06b7\u06b8\7")
buf.write("\177\2\2\u06b8\u06ba\7\4\2\2\u06b9\u06bb\58\35\2\u06ba")
buf.write("\u06b9\3\2\2\2\u06ba\u06bb\3\2\2\2\u06bb\u06bc\3\2\2\2")
buf.write("\u06bc\u06bf\5^\60\2\u06bd\u06be\7\6\2\2\u06be\u06c0\5")
buf.write("l\67\2\u06bf\u06bd\3\2\2\2\u06bf\u06c0\3\2\2\2\u06c0\u06c4")
buf.write("\3\2\2\2\u06c1\u06c2\7\u009e\2\2\u06c2\u06c3\7\u00a8\2")
buf.write("\2\u06c3\u06c5\5F$\2\u06c4\u06c1\3\2\2\2\u06c4\u06c5\3")
buf.write("\2\2\2\u06c5\u06c6\3\2\2\2\u06c6\u06c7\7\5\2\2\u06c7\u06c8")
buf.write("\7\u00fb\2\2\u06c8\u06c9\7c\2\2\u06c9\u06ca\7\4\2\2\u06ca")
buf.write("\u06cb\7\u00a3\2\2\u06cb\u06cc\7\"\2\2\u06cc\u06d1\5(")
buf.write("\25\2\u06cd\u06ce\7\6\2\2\u06ce\u06d0\5(\25\2\u06cf\u06cd")
buf.write("\3\2\2\2\u06d0\u06d3\3\2\2\2\u06d1\u06cf\3\2\2\2\u06d1")
buf.write("\u06d2\3\2\2\2\u06d2\u06d4\3\2\2\2\u06d3\u06d1\3\2\2\2")
buf.write("\u06d4\u06d5\7\5\2\2\u06d5\u07ad\3\2\2\2\u06d6\u06d8\5")
buf.write("h\65\2\u06d7\u06d6\3\2\2\2\u06d7\u06d8\3\2\2\2\u06d8\u06d9")
buf.write("\3\2\2\2\u06d9\u06da\5\u00a6T\2\u06da\u06de\7\4\2\2\u06db")
buf.write("\u06dc\5\u00b2Z\2\u06dc\u06dd\7\3\2\2\u06dd\u06df\3\2")
buf.write("\2\2\u06de\u06db\3\2\2\2\u06de\u06df\3\2\2\2\u06df\u06e0")
buf.write("\3\2\2\2\u06e0\u06e1\7\u0109\2\2\u06e1\u06e3\7\5\2\2\u06e2")
buf.write("\u06e4\5\u0084C\2\u06e3\u06e2\3\2\2\2\u06e3\u06e4\3\2")
buf.write("\2\2\u06e4\u06e6\3\2\2\2\u06e5\u06e7\5\u0088E\2\u06e6")
buf.write("\u06e5\3\2\2\2\u06e6\u06e7\3\2\2\2\u06e7\u07ad\3\2\2\2")
buf.write("\u06e8\u06ea\5h\65\2\u06e9\u06e8\3\2\2\2\u06e9\u06ea\3")
buf.write("\2\2\2\u06ea\u06eb\3\2\2\2\u06eb\u06ec\5\u00a6T\2\u06ec")
buf.write("\u06f8\7\4\2\2\u06ed\u06ef\58\35\2\u06ee\u06ed\3\2\2\2")
buf.write("\u06ee\u06ef\3\2\2\2\u06ef\u06f0\3\2\2\2\u06f0\u06f5\5")
buf.write("^\60\2\u06f1\u06f2\7\6\2\2\u06f2\u06f4\5^\60\2\u06f3\u06f1")
buf.write("\3\2\2\2\u06f4\u06f7\3\2\2\2\u06f5\u06f3\3\2\2\2\u06f5")
buf.write("\u06f6\3\2\2\2\u06f6\u06f9\3\2\2\2\u06f7\u06f5\3\2\2\2")
buf.write("\u06f8\u06ee\3\2\2\2\u06f8\u06f9\3\2\2\2\u06f9\u0704\3")
buf.write("\2\2\2\u06fa\u06fb\7\u00a3\2\2\u06fb\u06fc\7\"\2\2\u06fc")
buf.write("\u0701\5(\25\2\u06fd\u06fe\7\6\2\2\u06fe\u0700\5(\25\2")
buf.write("\u06ff\u06fd\3\2\2\2\u0700\u0703\3\2\2\2\u0701\u06ff\3")
buf.write("\2\2\2\u0701\u0702\3\2\2\2\u0702\u0705\3\2\2\2\u0703\u0701")
buf.write("\3\2\2\2\u0704\u06fa\3\2\2\2\u0704\u0705\3\2\2\2\u0705")
buf.write("\u0706\3\2\2\2\u0706\u0708\7\5\2\2\u0707\u0709\5\u0084")
buf.write("C\2\u0708\u0707\3\2\2\2\u0708\u0709\3\2\2\2\u0709\u070e")
buf.write("\3\2\2\2\u070a\u070c\5j\66\2\u070b\u070a\3\2\2\2\u070b")
buf.write("\u070c\3\2\2\2\u070c\u070d\3\2\2\2\u070d\u070f\5\u0088")
buf.write("E\2\u070e\u070b\3\2\2\2\u070e\u070f\3\2\2\2\u070f\u07ad")
buf.write("\3\2\2\2\u0710\u0711\5\u00b2Z\2\u0711\u0712\5\u0088E\2")
buf.write("\u0712\u07ad\3\2\2\2\u0713\u0714\5\u00b2Z\2\u0714\u0715")
buf.write("\7\b\2\2\u0715\u0716\5^\60\2\u0716\u07ad\3\2\2\2\u0717")
buf.write("\u0720\7\4\2\2\u0718\u071d\5\u00b2Z\2\u0719\u071a\7\6")
buf.write("\2\2\u071a\u071c\5\u00b2Z\2\u071b\u0719\3\2\2\2\u071c")
buf.write("\u071f\3\2\2\2\u071d\u071b\3\2\2\2\u071d\u071e\3\2\2\2")
buf.write("\u071e\u0721\3\2\2\2\u071f\u071d\3\2\2\2\u0720\u0718\3")
buf.write("\2\2\2\u0720\u0721\3\2\2\2\u0721\u0722\3\2\2\2\u0722\u0723")
buf.write("\7\5\2\2\u0723\u0724\7\b\2\2\u0724\u07ad\5^\60\2\u0725")
buf.write("\u0726\7\4\2\2\u0726\u0727\5\16\b\2\u0727\u0728\7\5\2")
buf.write("\2\u0728\u07ad\3\2\2\2\u0729\u072a\7Q\2\2\u072a\u072b")
buf.write("\7\4\2\2\u072b\u072c\5\16\b\2\u072c\u072d\7\5\2\2\u072d")
buf.write("\u07ad\3\2\2\2\u072e\u072f\7%\2\2\u072f\u0731\5^\60\2")
buf.write("\u0730\u0732\5\u0082B\2\u0731\u0730\3\2\2\2\u0732\u0733")
buf.write("\3\2\2\2\u0733\u0731\3\2\2\2\u0733\u0734\3\2\2\2\u0734")
buf.write("\u0737\3\2\2\2\u0735\u0736\7I\2\2\u0736\u0738\5^\60\2")
buf.write("\u0737\u0735\3\2\2\2\u0737\u0738\3\2\2\2\u0738\u0739\3")
buf.write("\2\2\2\u0739\u073a\7K\2\2\u073a\u07ad\3\2\2\2\u073b\u073d")
buf.write("\7%\2\2\u073c\u073e\5\u0082B\2\u073d\u073c\3\2\2\2\u073e")
buf.write("\u073f\3\2\2\2\u073f\u073d\3\2\2\2\u073f\u0740\3\2\2\2")
buf.write("\u0740\u0743\3\2\2\2\u0741\u0742\7I\2\2\u0742\u0744\5")
buf.write("^\60\2\u0743\u0741\3\2\2\2\u0743\u0744\3\2\2\2\u0744\u0745")
buf.write("\3\2\2\2\u0745\u0746\7K\2\2\u0746\u07ad\3\2\2\2\u0747")
buf.write("\u0748\7&\2\2\u0748\u0749\7\4\2\2\u0749\u074a\5^\60\2")
buf.write("\u074a\u074b\7\34\2\2\u074b\u074c\5|?\2\u074c\u074d\7")
buf.write("\5\2\2\u074d\u07ad\3\2\2\2\u074e\u074f\7\u00e6\2\2\u074f")
buf.write("\u0750\7\4\2\2\u0750\u0751\5^\60\2\u0751\u0752\7\34\2")
buf.write("\2\u0752\u0753\5|?\2\u0753\u0754\7\5\2\2\u0754\u07ad\3")
buf.write("\2\2\2\u0755\u0756\7\33\2\2\u0756\u075f\7\t\2\2\u0757")
buf.write("\u075c\5^\60\2\u0758\u0759\7\6\2\2\u0759\u075b\5^\60\2")
buf.write("\u075a\u0758\3\2\2\2\u075b\u075e\3\2\2\2\u075c\u075a\3")
buf.write("\2\2\2\u075c\u075d\3\2\2\2\u075d\u0760\3\2\2\2\u075e\u075c")
buf.write("\3\2\2\2\u075f\u0757\3\2\2\2\u075f\u0760\3\2\2\2\u0760")
buf.write("\u0761\3\2\2\2\u0761\u07ad\7\n\2\2\u0762\u07ad\5\u00b2")
buf.write("Z\2\u0763\u07ad\7\64\2\2\u0764\u0768\78\2\2\u0765\u0766")
buf.write("\7\4\2\2\u0766\u0767\7\u0111\2\2\u0767\u0769\7\5\2\2\u0768")
buf.write("\u0765\3\2\2\2\u0768\u0769\3\2\2\2\u0769\u07ad\3\2\2\2")
buf.write("\u076a\u076e\79\2\2\u076b\u076c\7\4\2\2\u076c\u076d\7")
buf.write("\u0111\2\2\u076d\u076f\7\5\2\2\u076e\u076b\3\2\2\2\u076e")
buf.write("\u076f\3\2\2\2\u076f\u07ad\3\2\2\2\u0770\u0774\7\u0081")
buf.write("\2\2\u0771\u0772\7\4\2\2\u0772\u0773\7\u0111\2\2\u0773")
buf.write("\u0775\7\5\2\2\u0774\u0771\3\2\2\2\u0774\u0775\3\2\2\2")
buf.write("\u0775\u07ad\3\2\2\2\u0776\u077a\7\u0082\2\2\u0777\u0778")
buf.write("\7\4\2\2\u0778\u0779\7\u0111\2\2\u0779\u077b\7\5\2\2\u077a")
buf.write("\u0777\3\2\2\2\u077a\u077b\3\2\2\2\u077b\u07ad\3\2\2\2")
buf.write("\u077c\u07ad\7:\2\2\u077d\u07ad\7\63\2\2\u077e\u07ad\7")
buf.write("\67\2\2\u077f\u07ad\7\65\2\2\u0780\u0781\7\u00d8\2\2\u0781")
buf.write("\u0782\7\4\2\2\u0782\u0783\5d\63\2\u0783\u0784\7\\\2\2")
buf.write("\u0784\u0787\5d\63\2\u0785\u0786\7Z\2\2\u0786\u0788\5")
buf.write("d\63\2\u0787\u0785\3\2\2\2\u0787\u0788\3\2\2\2\u0788\u0789")
buf.write("\3\2\2\2\u0789\u078a\7\5\2\2\u078a\u07ad\3\2\2\2\u078b")
buf.write("\u078c\7\u0096\2\2\u078c\u078d\7\4\2\2\u078d\u0790\5d")
buf.write("\63\2\u078e\u078f\7\6\2\2\u078f\u0791\5z>\2\u0790\u078e")
buf.write("\3\2\2\2\u0790\u0791\3\2\2\2\u0791\u0792\3\2\2\2\u0792")
buf.write("\u0793\7\5\2\2\u0793\u07ad\3\2\2\2\u0794\u0795\7S\2\2")
buf.write("\u0795\u0796\7\4\2\2\u0796\u0797\5\u00b2Z\2\u0797\u0798")
buf.write("\7\\\2\2\u0798\u0799\5d\63\2\u0799\u079a\7\5\2\2\u079a")
buf.write("\u07ad\3\2\2\2\u079b\u079c\7\4\2\2\u079c\u079d\5^\60\2")
buf.write("\u079d\u079e\7\5\2\2\u079e\u07ad\3\2\2\2\u079f\u07a0\7")
buf.write("d\2\2\u07a0\u07a9\7\4\2\2\u07a1\u07a6\5\u00a6T\2\u07a2")
buf.write("\u07a3\7\6\2\2\u07a3\u07a5\5\u00a6T\2\u07a4\u07a2\3\2")
buf.write("\2\2\u07a5\u07a8\3\2\2\2\u07a6\u07a4\3\2\2\2\u07a6\u07a7")
buf.write("\3\2\2\2\u07a7\u07aa\3\2\2\2\u07a8\u07a6\3\2\2\2\u07a9")
buf.write("\u07a1\3\2\2\2\u07a9\u07aa\3\2\2\2\u07aa\u07ab\3\2\2\2")
buf.write("\u07ab\u07ad\7\5\2\2\u07ac\u068c\3\2\2\2\u07ac\u068e\3")
buf.write("\2\2\2\u07ac\u068f\3\2\2\2\u07ac\u0692\3\2\2\2\u07ac\u0695")
buf.write("\3\2\2\2\u07ac\u0696\3\2\2\2\u07ac\u0697\3\2\2\2\u07ac")
buf.write("\u0698\3\2\2\2\u07ac\u0699\3\2\2\2\u07ac\u069a\3\2\2\2")
buf.write("\u07ac\u06a1\3\2\2\2\u07ac\u06ab\3\2\2\2\u07ac\u06b7\3")
buf.write("\2\2\2\u07ac\u06d7\3\2\2\2\u07ac\u06e9\3\2\2\2\u07ac\u0710")
buf.write("\3\2\2\2\u07ac\u0713\3\2\2\2\u07ac\u0717\3\2\2\2\u07ac")
buf.write("\u0725\3\2\2\2\u07ac\u0729\3\2\2\2\u07ac\u072e\3\2\2\2")
buf.write("\u07ac\u073b\3\2\2\2\u07ac\u0747\3\2\2\2\u07ac\u074e\3")
buf.write("\2\2\2\u07ac\u0755\3\2\2\2\u07ac\u0762\3\2\2\2\u07ac\u0763")
buf.write("\3\2\2\2\u07ac\u0764\3\2\2\2\u07ac\u076a\3\2\2\2\u07ac")
buf.write("\u0770\3\2\2\2\u07ac\u0776\3\2\2\2\u07ac\u077c\3\2\2\2")
buf.write("\u07ac\u077d\3\2\2\2\u07ac\u077e\3\2\2\2\u07ac\u077f\3")
buf.write("\2\2\2\u07ac\u0780\3\2\2\2\u07ac\u078b\3\2\2\2\u07ac\u0794")
buf.write("\3\2\2\2\u07ac\u079b\3\2\2\2\u07ac\u079f\3\2\2\2\u07ad")
buf.write("\u07b8\3\2\2\2\u07ae\u07af\f\23\2\2\u07af\u07b0\7\t\2")
buf.write("\2\u07b0\u07b1\5d\63\2\u07b1\u07b2\7\n\2\2\u07b2\u07b7")
buf.write("\3\2\2\2\u07b3\u07b4\f\21\2\2\u07b4\u07b5\7\3\2\2\u07b5")
buf.write("\u07b7\5\u00b2Z\2\u07b6\u07ae\3\2\2\2\u07b6\u07b3\3\2")
buf.write("\2\2\u07b7\u07ba\3\2\2\2\u07b8\u07b6\3\2\2\2\u07b8\u07b9")
buf.write("\3\2\2\2\u07b9g\3\2\2\2\u07ba\u07b8\3\2\2\2\u07bb\u07bc")
buf.write("\t\22\2\2\u07bci\3\2\2\2\u07bd\u07be\7i\2\2\u07be\u07c2")
buf.write("\7\u009a\2\2\u07bf\u07c0\7\u00be\2\2\u07c0\u07c2\7\u009a")
buf.write("\2\2\u07c1\u07bd\3\2\2\2\u07c1\u07bf\3\2\2\2\u07c2k\3")
buf.write("\2\2\2\u07c3\u07ca\7\u010e\2\2\u07c4\u07c7\7\u010f\2\2")
buf.write("\u07c5\u07c6\7\u00e8\2\2\u07c6\u07c8\7\u010e\2\2\u07c7")
buf.write("\u07c5\3\2\2\2\u07c7\u07c8\3\2\2\2\u07c8\u07ca\3\2\2\2")
buf.write("\u07c9\u07c3\3\2\2\2\u07c9\u07c4\3\2\2\2\u07cam\3\2\2")
buf.write("\2\u07cb\u07cc\7\u00e0\2\2\u07cc\u07cd\7\u0100\2\2\u07cd")
buf.write("\u07d2\5v<\2\u07ce\u07cf\7\u00e0\2\2\u07cf\u07d0\7\u0100")
buf.write("\2\2\u07d0\u07d2\5l\67\2\u07d1\u07cb\3\2\2\2\u07d1\u07ce")
buf.write("\3\2\2\2\u07d2o\3\2\2\2\u07d3\u07d4\t\23\2\2\u07d4q\3")
buf.write("\2\2\2\u07d5\u07d6\t\24\2\2\u07d6s\3\2\2\2\u07d7\u07d8")
buf.write("\t\25\2\2\u07d8u\3\2\2\2\u07d9\u07db\7q\2\2\u07da\u07dc")
buf.write("\t\20\2\2\u07db\u07da\3\2\2\2\u07db\u07dc\3\2\2\2\u07dc")
buf.write("\u07dd\3\2\2\2\u07dd\u07de\5l\67\2\u07de\u07e1\5x=\2\u07df")
buf.write("\u07e0\7\u00e2\2\2\u07e0\u07e2\5x=\2\u07e1\u07df\3\2\2")
buf.write("\2\u07e1\u07e2\3\2\2\2\u07e2w\3\2\2\2\u07e3\u07e4\t\26")
buf.write("\2\2\u07e4y\3\2\2\2\u07e5\u07e6\t\27\2\2\u07e6{\3\2\2")
buf.write("\2\u07e7\u07e8\b?\1\2\u07e8\u07e9\7\u00c6\2\2\u07e9\u07ea")
buf.write("\7\4\2\2\u07ea\u07ef\5~@\2\u07eb\u07ec\7\6\2\2\u07ec\u07ee")
buf.write("\5~@\2\u07ed\u07eb\3\2\2\2\u07ee\u07f1\3\2\2\2\u07ef\u07ed")
buf.write("\3\2\2\2\u07ef\u07f0\3\2\2\2\u07f0\u07f2\3\2\2\2\u07f1")
buf.write("\u07ef\3\2\2\2\u07f2\u07f3\7\5\2\2\u07f3\u0843\3\2\2\2")
buf.write("\u07f4\u07f5\7q\2\2\u07f5\u07f8\5x=\2\u07f6\u07f7\7\u00e2")
buf.write("\2\2\u07f7\u07f9\5x=\2\u07f8\u07f6\3\2\2\2\u07f8\u07f9")
buf.write("\3\2\2\2\u07f9\u0843\3\2\2\2\u07fa\u07ff\7\u00e1\2\2\u07fb")
buf.write("\u07fc\7\4\2\2\u07fc\u07fd\5\u0080A\2\u07fd\u07fe\7\5")
buf.write("\2\2\u07fe\u0800\3\2\2\2\u07ff\u07fb\3\2\2\2\u07ff\u0800")
buf.write("\3\2\2\2\u0800\u0804\3\2\2\2\u0801\u0802\7\u00fc\2\2\u0802")
buf.write("\u0803\7\u00e0\2\2\u0803\u0805\7\u0100\2\2\u0804\u0801")
buf.write("\3\2\2\2\u0804\u0805\3\2\2\2\u0805\u0843\3\2\2\2\u0806")
buf.write("\u080b\7\u00e1\2\2\u0807\u0808\7\4\2\2\u0808\u0809\5\u0080")
buf.write("A\2\u0809\u080a\7\5\2\2\u080a\u080c\3\2\2\2\u080b\u0807")
buf.write("\3\2\2\2\u080b\u080c\3\2\2\2\u080c\u080d\3\2\2\2\u080d")
buf.write("\u080e\7\u00fa\2\2\u080e\u080f\7\u00e0\2\2\u080f\u0843")
buf.write("\7\u0100\2\2\u0810\u0815\7\u00e0\2\2\u0811\u0812\7\4\2")
buf.write("\2\u0812\u0813\5\u0080A\2\u0813\u0814\7\5\2\2\u0814\u0816")
buf.write("\3\2\2\2\u0815\u0811\3\2\2\2\u0815\u0816\3\2\2\2\u0816")
buf.write("\u081a\3\2\2\2\u0817\u0818\7\u00fc\2\2\u0818\u0819\7\u00e0")
buf.write("\2\2\u0819\u081b\7\u0100\2\2\u081a\u0817\3\2\2\2\u081a")
buf.write("\u081b\3\2\2\2\u081b\u0843\3\2\2\2\u081c\u0821\7\u00e0")
buf.write("\2\2\u081d\u081e\7\4\2\2\u081e\u081f\5\u0080A\2\u081f")
buf.write("\u0820\7\5\2\2\u0820\u0822\3\2\2\2\u0821\u081d\3\2\2\2")
buf.write("\u0821\u0822\3\2\2\2\u0822\u0823\3\2\2\2\u0823\u0824\7")
buf.write("\u00fa\2\2\u0824\u0825\7\u00e0\2\2\u0825\u0843\7\u0100")
buf.write("\2\2\u0826\u0827\7G\2\2\u0827\u0843\7\u00b2\2\2\u0828")
buf.write("\u0829\7\33\2\2\u0829\u082a\7\u0103\2\2\u082a\u082b\5")
buf.write("|?\2\u082b\u082c\7\u0105\2\2\u082c\u0843\3\2\2\2\u082d")
buf.write("\u082e\7\u0084\2\2\u082e\u082f\7\u0103\2\2\u082f\u0830")
buf.write("\5|?\2\u0830\u0831\7\6\2\2\u0831\u0832\5|?\2\u0832\u0833")
buf.write("\7\u0105\2\2\u0833\u0843\3\2\2\2\u0834\u0840\5\u00b2Z")
buf.write("\2\u0835\u0836\7\4\2\2\u0836\u083b\5\u0080A\2\u0837\u0838")
buf.write("\7\6\2\2\u0838\u083a\5\u0080A\2\u0839\u0837\3\2\2\2\u083a")
buf.write("\u083d\3\2\2\2\u083b\u0839\3\2\2\2\u083b\u083c\3\2\2\2")
buf.write("\u083c\u083e\3\2\2\2\u083d\u083b\3\2\2\2\u083e\u083f\7")
buf.write("\5\2\2\u083f\u0841\3\2\2\2\u0840\u0835\3\2\2\2\u0840\u0841")
buf.write("\3\2\2\2\u0841\u0843\3\2\2\2\u0842\u07e7\3\2\2\2\u0842")
buf.write("\u07f4\3\2\2\2\u0842\u07fa\3\2\2\2\u0842\u0806\3\2\2\2")
buf.write("\u0842\u0810\3\2\2\2\u0842\u081c\3\2\2\2\u0842\u0826\3")
buf.write("\2\2\2\u0842\u0828\3\2\2\2\u0842\u082d\3\2\2\2\u0842\u0834")
buf.write("\3\2\2\2\u0843\u084d\3\2\2\2\u0844\u0845\f\4\2\2\u0845")
buf.write("\u0849\7\33\2\2\u0846\u0847\7\t\2\2\u0847\u0848\7\u0111")
buf.write("\2\2\u0848\u084a\7\n\2\2\u0849\u0846\3\2\2\2\u0849\u084a")
buf.write("\3\2\2\2\u084a\u084c\3\2\2\2\u084b\u0844\3\2\2\2\u084c")
buf.write("\u084f\3\2\2\2\u084d\u084b\3\2\2\2\u084d\u084e\3\2\2\2")
buf.write("\u084e}\3\2\2\2\u084f\u084d\3\2\2\2\u0850\u0855\5|?\2")
buf.write("\u0851\u0852\5\u00b2Z\2\u0852\u0853\5|?\2\u0853\u0855")
buf.write("\3\2\2\2\u0854\u0850\3\2\2\2\u0854\u0851\3\2\2\2\u0855")
buf.write("\177\3\2\2\2\u0856\u0859\7\u0111\2\2\u0857\u0859\5|?\2")
buf.write("\u0858\u0856\3\2\2\2\u0858\u0857\3\2\2\2\u0859\u0081\3")
buf.write("\2\2\2\u085a\u085b\7\u00f7\2\2\u085b\u085c\5^\60\2\u085c")
buf.write("\u085d\7\u00de\2\2\u085d\u085e\5^\60\2\u085e\u0083\3\2")
buf.write("\2\2\u085f\u0860\7V\2\2\u0860\u0861\7\4\2\2\u0861\u0862")
buf.write("\7\u00f8\2\2\u0862\u0863\5`\61\2\u0863\u0864\7\5\2\2\u0864")
buf.write("\u0085\3\2\2\2\u0865\u0866\7\u00f7\2\2\u0866\u0869\7\u0086")
buf.write("\2\2\u0867\u0868\7\31\2\2\u0868\u086a\5^\60\2\u0869\u0867")
buf.write("\3\2\2\2\u0869\u086a\3\2\2\2\u086a\u086b\3\2\2\2\u086b")
buf.write("\u086c\7\u00de\2\2\u086c\u086d\7\u00ee\2\2\u086d\u086e")
buf.write("\7\u00d1\2\2\u086e\u086f\5\u00b2Z\2\u086f\u0870\7\u0101")
buf.write("\2\2\u0870\u0878\5^\60\2\u0871\u0872\7\6\2\2\u0872\u0873")
buf.write("\5\u00b2Z\2\u0873\u0874\7\u0101\2\2\u0874\u0875\5^\60")
buf.write("\2\u0875\u0877\3\2\2\2\u0876\u0871\3\2\2\2\u0877\u087a")
buf.write("\3\2\2\2\u0878\u0876\3\2\2\2\u0878\u0879\3\2\2\2\u0879")
buf.write("\u08a6\3\2\2\2\u087a\u0878\3\2\2\2\u087b\u087c\7\u00f7")
buf.write("\2\2\u087c\u087f\7\u0086\2\2\u087d\u087e\7\31\2\2\u087e")
buf.write("\u0880\5^\60\2\u087f\u087d\3\2\2\2\u087f\u0880\3\2\2\2")
buf.write("\u0880\u0881\3\2\2\2\u0881\u0882\7\u00de\2\2\u0882\u08a6")
buf.write("\7@\2\2\u0883\u0884\7\u00f7\2\2\u0884\u0885\7\u0097\2")
buf.write("\2\u0885\u0888\7\u0086\2\2\u0886\u0887\7\31\2\2\u0887")
buf.write("\u0889\5^\60\2\u0888\u0886\3\2\2\2\u0888\u0889\3\2\2\2")
buf.write("\u0889\u088a\3\2\2\2\u088a\u088b\7\u00de\2\2\u088b\u0897")
buf.write("\7o\2\2\u088c\u088d\7\4\2\2\u088d\u0892\5\u00b2Z\2\u088e")
buf.write("\u088f\7\6\2\2\u088f\u0891\5\u00b2Z\2\u0890\u088e\3\2")
buf.write("\2\2\u0891\u0894\3\2\2\2\u0892\u0890\3\2\2\2\u0892\u0893")
buf.write("\3\2\2\2\u0893\u0895\3\2\2\2\u0894\u0892\3\2\2\2\u0895")
buf.write("\u0896\7\5\2\2\u0896\u0898\3\2\2\2\u0897\u088c\3\2\2\2")
buf.write("\u0897\u0898\3\2\2\2\u0898\u0899\3\2\2\2\u0899\u089a\7")
buf.write("\u00f3\2\2\u089a\u089b\7\4\2\2\u089b\u08a0\5^\60\2\u089c")
buf.write("\u089d\7\6\2\2\u089d\u089f\5^\60\2\u089e\u089c\3\2\2\2")
buf.write("\u089f\u08a2\3\2\2\2\u08a0\u089e\3\2\2\2\u08a0\u08a1\3")
buf.write("\2\2\2\u08a1\u08a3\3\2\2\2\u08a2\u08a0\3\2\2\2\u08a3\u08a4")
buf.write("\7\5\2\2\u08a4\u08a6\3\2\2\2\u08a5\u0865\3\2\2\2\u08a5")
buf.write("\u087b\3\2\2\2\u08a5\u0883\3\2\2\2\u08a6\u0087\3\2\2\2")
buf.write("\u08a7\u08ad\7\u00a7\2\2\u08a8\u08ae\5\u00b2Z\2\u08a9")
buf.write("\u08aa\7\4\2\2\u08aa\u08ab\5\64\33\2\u08ab\u08ac\7\5\2")
buf.write("\2\u08ac\u08ae\3\2\2\2\u08ad\u08a8\3\2\2\2\u08ad\u08a9")
buf.write("\3\2\2\2\u08ae\u0089\3\2\2\2\u08af\u08b0\7\u008a\2\2\u08b0")
buf.write("\u08b5\5L\'\2\u08b1\u08b2\7\6\2\2\u08b2\u08b4\5L\'\2\u08b3")
buf.write("\u08b1\3\2\2\2\u08b4\u08b7\3\2\2\2\u08b5\u08b3\3\2\2\2")
buf.write("\u08b5\u08b6\3\2\2\2\u08b6\u08b9\3\2\2\2\u08b7\u08b5\3")
buf.write("\2\2\2\u08b8\u08af\3\2\2\2\u08b8\u08b9\3\2\2\2\u08b9\u08ba")
buf.write("\3\2\2\2\u08ba\u08be\5\u008cG\2\u08bb\u08bc\7\25\2\2\u08bc")
buf.write("\u08bd\7\u0085\2\2\u08bd\u08bf\5R*\2\u08be\u08bb\3\2\2")
buf.write("\2\u08be\u08bf\3\2\2\2\u08bf\u08c1\3\2\2\2\u08c0\u08c2")
buf.write("\t\17\2\2\u08c1\u08c0\3\2\2\2\u08c1\u08c2\3\2\2\2\u08c2")
buf.write("\u08c8\3\2\2\2\u08c3\u08c4\7\u00ad\2\2\u08c4\u08c5\7\4")
buf.write("\2\2\u08c5\u08c6\5\u0090I\2\u08c6\u08c7\7\5\2\2\u08c7")
buf.write("\u08c9\3\2\2\2\u08c8\u08c3\3\2\2\2\u08c8\u08c9\3\2\2\2")
buf.write("\u08c9\u08d3\3\2\2\2\u08ca\u08cb\7\u00d7\2\2\u08cb\u08d0")
buf.write("\5T+\2\u08cc\u08cd\7\6\2\2\u08cd\u08cf\5T+\2\u08ce\u08cc")
buf.write("\3\2\2\2\u08cf\u08d2\3\2\2\2\u08d0\u08ce\3\2\2\2\u08d0")
buf.write("\u08d1\3\2\2\2\u08d1\u08d4\3\2\2\2\u08d2\u08d0\3\2\2\2")
buf.write("\u08d3\u08ca\3\2\2\2\u08d3\u08d4\3\2\2\2\u08d4\u08de\3")
buf.write("\2\2\2\u08d5\u08d6\7D\2\2\u08d6\u08db\5V,\2\u08d7\u08d8")
buf.write("\7\6\2\2\u08d8\u08da\5V,\2\u08d9\u08d7\3\2\2\2\u08da\u08dd")
buf.write("\3\2\2\2\u08db\u08d9\3\2\2\2\u08db\u08dc\3\2\2\2\u08dc")
buf.write("\u08df\3\2\2\2\u08dd\u08db\3\2\2\2\u08de\u08d5\3\2\2\2")
buf.write("\u08de\u08df\3\2\2\2\u08df\u008b\3\2\2\2\u08e0\u08e1\7")
buf.write("\u00b6\2\2\u08e1\u08f9\5\u008eH\2\u08e2\u08e3\7\u00c7")
buf.write("\2\2\u08e3\u08f9\5\u008eH\2\u08e4\u08e5\7e\2\2\u08e5\u08f9")
buf.write("\5\u008eH\2\u08e6\u08e7\7\u00b6\2\2\u08e7\u08e8\7!\2\2")
buf.write("\u08e8\u08e9\5\u008eH\2\u08e9\u08ea\7\31\2\2\u08ea\u08eb")
buf.write("\5\u008eH\2\u08eb\u08f9\3\2\2\2\u08ec\u08ed\7\u00c7\2")
buf.write("\2\u08ed\u08ee\7!\2\2\u08ee\u08ef\5\u008eH\2\u08ef\u08f0")
buf.write("\7\31\2\2\u08f0\u08f1\5\u008eH\2\u08f1\u08f9\3\2\2\2\u08f2")
buf.write("\u08f3\7e\2\2\u08f3\u08f4\7!\2\2\u08f4\u08f5\5\u008eH")
buf.write("\2\u08f5\u08f6\7\31\2\2\u08f6\u08f7\5\u008eH\2\u08f7\u08f9")
buf.write("\3\2\2\2\u08f8\u08e0\3\2\2\2\u08f8\u08e2\3\2\2\2\u08f8")
buf.write("\u08e4\3\2\2\2\u08f8\u08e6\3\2\2\2\u08f8\u08ec\3\2\2\2")
buf.write("\u08f8\u08f2\3\2\2\2\u08f9\u008d\3\2\2\2\u08fa\u08fb\7")
buf.write("\u00e9\2\2\u08fb\u0904\7\u00b1\2\2\u08fc\u08fd\7\u00e9")
buf.write("\2\2\u08fd\u0904\7Y\2\2\u08fe\u08ff\7\62\2\2\u08ff\u0904")
buf.write("\7\u00c6\2\2\u0900\u0901\5^\60\2\u0901\u0902\t\30\2\2")
buf.write("\u0902\u0904\3\2\2\2\u0903\u08fa\3\2\2\2\u0903\u08fc\3")
buf.write("\2\2\2\u0903\u08fe\3\2\2\2\u0903\u0900\3\2\2\2\u0904\u008f")
buf.write("\3\2\2\2\u0905\u0906\bI\1\2\u0906\u0908\5\u0092J\2\u0907")
buf.write("\u0909\5\u0094K\2\u0908\u0907\3\2\2\2\u0908\u0909\3\2")
buf.write("\2\2\u0909\u0911\3\2\2\2\u090a\u090b\f\4\2\2\u090b\u0910")
buf.write("\5\u0090I\5\u090c\u090d\f\3\2\2\u090d\u090e\7\13\2\2\u090e")
buf.write("\u0910\5\u0090I\4\u090f\u090a\3\2\2\2\u090f\u090c\3\2")
buf.write("\2\2\u0910\u0913\3\2\2\2\u0911\u090f\3\2\2\2\u0911\u0912")
buf.write("\3\2\2\2\u0912\u0091\3\2\2\2\u0913\u0911\3\2\2\2\u0914")
buf.write("\u092e\5\u00b2Z\2\u0915\u0916\7\4\2\2\u0916\u092e\7\5")
buf.write("\2\2\u0917\u0918\7\u00af\2\2\u0918\u0919\7\4\2\2\u0919")
buf.write("\u091e\5\u0090I\2\u091a\u091b\7\6\2\2\u091b\u091d\5\u0090")
buf.write("I\2\u091c\u091a\3\2\2\2\u091d\u0920\3\2\2\2\u091e\u091c")
buf.write("\3\2\2\2\u091e\u091f\3\2\2\2\u091f\u0921\3\2\2\2\u0920")
buf.write("\u091e\3\2\2\2\u0921\u0922\7\5\2\2\u0922\u092e\3\2\2\2")
buf.write("\u0923\u0924\7\4\2\2\u0924\u0925\5\u0090I\2\u0925\u0926")
buf.write("\7\5\2\2\u0926\u092e\3\2\2\2\u0927\u092e\7\f\2\2\u0928")
buf.write("\u092e\7\r\2\2\u0929\u092a\7\16\2\2\u092a\u092b\5\u0090")
buf.write("I\2\u092b\u092c\7\17\2\2\u092c\u092e\3\2\2\2\u092d\u0914")
buf.write("\3\2\2\2\u092d\u0915\3\2\2\2\u092d\u0917\3\2\2\2\u092d")
buf.write("\u0923\3\2\2\2\u092d\u0927\3\2\2\2\u092d\u0928\3\2\2\2")
buf.write("\u092d\u0929\3\2\2\2\u092e\u0093\3\2\2\2\u092f\u0931\7")
buf.write("\u0109\2\2\u0930\u0932\7\u010d\2\2\u0931\u0930\3\2\2\2")
buf.write("\u0931\u0932\3\2\2\2\u0932\u094e\3\2\2\2\u0933\u0935\7")
buf.write("\u0107\2\2\u0934\u0936\7\u010d\2\2\u0935\u0934\3\2\2\2")
buf.write("\u0935\u0936\3\2\2\2\u0936\u094e\3\2\2\2\u0937\u0939\7")
buf.write("\u010d\2\2\u0938\u093a\7\u010d\2\2\u0939\u0938\3\2\2\2")
buf.write("\u0939\u093a\3\2\2\2\u093a\u094e\3\2\2\2\u093b\u093c\7")
buf.write("\20\2\2\u093c\u093d\7\u0111\2\2\u093d\u093f\7\21\2\2\u093e")
buf.write("\u0940\7\u010d\2\2\u093f\u093e\3\2\2\2\u093f\u0940\3\2")
buf.write("\2\2\u0940\u094e\3\2\2\2\u0941\u0943\7\20\2\2\u0942\u0944")
buf.write("\7\u0111\2\2\u0943\u0942\3\2\2\2\u0943\u0944\3\2\2\2\u0944")
buf.write("\u0945\3\2\2\2\u0945\u0947\7\6\2\2\u0946\u0948\7\u0111")
buf.write("\2\2\u0947\u0946\3\2\2\2\u0947\u0948\3\2\2\2\u0948\u0949")
buf.write("\3\2\2\2\u0949\u094b\7\21\2\2\u094a\u094c\7\u010d\2\2")
buf.write("\u094b\u094a\3\2\2\2\u094b\u094c\3\2\2\2\u094c\u094e\3")
buf.write("\2\2\2\u094d\u092f\3\2\2\2\u094d\u0933\3\2\2\2\u094d\u0937")
buf.write("\3\2\2\2\u094d\u093b\3\2\2\2\u094d\u0941\3\2\2\2\u094e")
buf.write("\u0095\3\2\2\2\u094f\u0950\5\u00b2Z\2\u0950\u0951\7\u0101")
buf.write("\2\2\u0951\u0952\5^\60\2\u0952\u0097\3\2\2\2\u0953\u0954")
buf.write("\7[\2\2\u0954\u0958\t\31\2\2\u0955\u0956\7\u00e7\2\2\u0956")
buf.write("\u0958\t\32\2\2\u0957\u0953\3\2\2\2\u0957\u0955\3\2\2")
buf.write("\2\u0958\u0099\3\2\2\2\u0959\u095a\7v\2\2\u095a\u095b")
buf.write("\7|\2\2\u095b\u095f\5\u009cO\2\u095c\u095d\7\u00b7\2\2")
buf.write("\u095d\u095f\t\33\2\2\u095e\u0959\3\2\2\2\u095e\u095c")
buf.write("\3\2\2\2\u095f\u009b\3\2\2\2\u0960\u0961\7\u00b7\2\2\u0961")
buf.write("\u0968\7\u00ea\2\2\u0962\u0963\7\u00b7\2\2\u0963\u0968")
buf.write("\7,\2\2\u0964\u0965\7\u00bb\2\2\u0965\u0968\7\u00b7\2")
buf.write("\2\u0966\u0968\7\u00cf\2\2\u0967\u0960\3\2\2\2\u0967\u0962")
buf.write("\3\2\2\2\u0967\u0964\3\2\2\2\u0967\u0966\3\2\2\2\u0968")
buf.write("\u009d\3\2\2\2\u0969\u096f\5^\60\2\u096a\u096b\5\u00b2")
buf.write("Z\2\u096b\u096c\7\22\2\2\u096c\u096d\5^\60\2\u096d\u096f")
buf.write("\3\2\2\2\u096e\u0969\3\2\2\2\u096e\u096a\3\2\2\2\u096f")
buf.write("\u009f\3\2\2\2\u0970\u0971\5\u00b2Z\2\u0971\u0972\7\3")
buf.write("\2\2\u0972\u0973\5\u00b2Z\2\u0973\u0976\3\2\2\2\u0974")
buf.write("\u0976\5\u00b2Z\2\u0975\u0970\3\2\2\2\u0975\u0974\3\2")
buf.write("\2\2\u0976\u00a1\3\2\2\2\u0977\u097c\5\u00a0Q\2\u0978")
buf.write("\u0979\7\6\2\2\u0979\u097b\5\u00a0Q\2\u097a\u0978\3\2")
buf.write("\2\2\u097b\u097e\3\2\2\2\u097c\u097a\3\2\2\2\u097c\u097d")
buf.write("\3\2\2\2\u097d\u00a3\3\2\2\2\u097e\u097c\3\2\2\2\u097f")
buf.write("\u0980\t\34\2\2\u0980\u00a5\3\2\2\2\u0981\u0986\5\u00b2")
buf.write("Z\2\u0982\u0983\7\3\2\2\u0983\u0985\5\u00b2Z\2\u0984\u0982")
buf.write("\3\2\2\2\u0985\u0988\3\2\2\2\u0986\u0984\3\2\2\2\u0986")
buf.write("\u0987\3\2\2\2\u0987\u00a7\3\2\2\2\u0988\u0986\3\2\2\2")
buf.write("\u0989\u098a\7Z\2\2\u098a\u098b\5\u00aaV\2\u098b\u098c")
buf.write("\7\34\2\2\u098c\u098d\7\u009d\2\2\u098d\u098e\5d\63\2")
buf.write("\u098e\u00a9\3\2\2\2\u098f\u0990\t\35\2\2\u0990\u00ab")
buf.write("\3\2\2\2\u0991\u0995\5\u00aeX\2\u0992\u0995\7:\2\2\u0993")
buf.write("\u0995\7\66\2\2\u0994\u0991\3\2\2\2\u0994\u0992\3\2\2")
buf.write("\2\u0994\u0993\3\2\2\2\u0995\u00ad\3\2\2\2\u0996\u099c")
buf.write("\5\u00b2Z\2\u0997\u0998\7\u00f0\2\2\u0998\u099c\5\u00b2")
buf.write("Z\2\u0999\u099a\7\u00c2\2\2\u099a\u099c\5\u00b2Z\2\u099b")
buf.write("\u0996\3\2\2\2\u099b\u0997\3\2\2\2\u099b\u0999\3\2\2\2")
buf.write("\u099c\u00af\3\2\2\2\u099d\u09a2\5\u00b2Z\2\u099e\u099f")
buf.write("\7\6\2\2\u099f\u09a1\5\u00b2Z\2\u09a0\u099e\3\2\2\2\u09a1")
buf.write("\u09a4\3\2\2\2\u09a2\u09a0\3\2\2\2\u09a2\u09a3\3\2\2\2")
buf.write("\u09a3\u00b1\3\2\2\2\u09a4\u09a2\3\2\2\2\u09a5\u09ab\7")
buf.write("\u0114\2\2\u09a6\u09ab\7\u0116\2\2\u09a7\u09ab\5\u00b6")
buf.write("\\\2\u09a8\u09ab\7\u0117\2\2\u09a9\u09ab\7\u0115\2\2\u09aa")
buf.write("\u09a5\3\2\2\2\u09aa\u09a6\3\2\2\2\u09aa\u09a7\3\2\2\2")
buf.write("\u09aa\u09a8\3\2\2\2\u09aa\u09a9\3\2\2\2\u09ab\u00b3\3")
buf.write("\2\2\2\u09ac\u09ae\7\u0108\2\2\u09ad\u09ac\3\2\2\2\u09ad")
buf.write("\u09ae\3\2\2\2\u09ae\u09af\3\2\2\2\u09af\u09b9\7\u0112")
buf.write("\2\2\u09b0\u09b2\7\u0108\2\2\u09b1\u09b0\3\2\2\2\u09b1")
buf.write("\u09b2\3\2\2\2\u09b2\u09b3\3\2\2\2\u09b3\u09b9\7\u0113")
buf.write("\2\2\u09b4\u09b6\7\u0108\2\2\u09b5\u09b4\3\2\2\2\u09b5")
buf.write("\u09b6\3\2\2\2\u09b6\u09b7\3\2\2\2\u09b7\u09b9\7\u0111")
buf.write("\2\2\u09b8\u09ad\3\2\2\2\u09b8\u09b1\3\2\2\2\u09b8\u09b5")
buf.write("\3\2\2\2\u09b9\u00b5\3\2\2\2\u09ba\u09bb\t\36\2\2\u09bb")
buf.write("\u00b7\3\2\2\2\u0148\u00d4\u00d9\u00dd\u00e3\u00e7\u00fc")
buf.write("\u0100\u0104\u0108\u0110\u0114\u0117\u011e\u0127\u012d")
buf.write("\u0131\u0137\u013e\u0147\u0153\u015c\u0162\u016d\u0175")
buf.write("\u017d\u0184\u018e\u0195\u01b2\u01b5\u01b8\u01bc\u01c2")
buf.write("\u01c7\u01ce\u01d3\u01d7\u01df\u01e5\u01e9\u01f7\u01ff")
buf.write("\u020a\u0223\u0226\u0230\u0234\u023b\u0245\u024b\u0250")
buf.write("\u0254\u025a\u0263\u0269\u026d\u0274\u0278\u0280\u0285")
buf.write("\u0289\u0291\u0299\u029e\u02a2\u02ac\u02b3\u02b8\u02bc")
buf.write("\u02c6\u02c9\u02d2\u02d7\u02dd\u02f5\u02fb\u02fd\u0303")
buf.write("\u0309\u030b\u0313\u0315\u031b\u0321\u0323\u0332\u0337")
buf.write("\u033e\u034a\u034c\u0354\u0356\u0368\u036b\u036f\u0373")
buf.write("\u0385\u0388\u0398\u03a2\u03a7\u03ad\u03b0\u03b9\u03bb")
buf.write("\u03be\u03c4\u03cb\u03d0\u03d6\u03da\u03de\u03e4\u03ef")
buf.write("\u03fe\u0401\u0406\u0408\u040f\u0415\u0417\u041b\u0425")
buf.write("\u042b\u042e\u0430\u043c\u0443\u0447\u044b\u044f\u0456")
buf.write("\u045f\u0462\u0466\u046b\u046f\u0477\u047a\u047d\u0484")
buf.write("\u048f\u0492\u049c\u049f\u04aa\u04af\u04b7\u04ba\u04be")
buf.write("\u04c7\u04d0\u04d3\u04dc\u04df\u04e2\u04e6\u04f1\u04f4")
buf.write("\u04fb\u04fe\u0511\u0515\u0519\u051d\u0521\u0525\u0527")
buf.write("\u0532\u0537\u0540\u0547\u054a\u0550\u055c\u055f\u0568")
buf.write("\u056b\u0573\u0576\u0579\u057e\u0581\u058d\u0590\u0598")
buf.write("\u059d\u05a1\u05a3\u05a5\u05b4\u05b6\u05c1\u05d6\u05e0")
buf.write("\u05eb\u05ef\u05f1\u05f9\u0600\u060d\u0613\u061e\u0625")
buf.write("\u0629\u0631\u0633\u0640\u0648\u0651\u0657\u065f\u0665")
buf.write("\u0669\u066e\u0673\u0679\u0687\u0689\u06a7\u06b2\u06ba")
buf.write("\u06bf\u06c4\u06d1\u06d7\u06de\u06e3\u06e6\u06e9\u06ee")
buf.write("\u06f5\u06f8\u0701\u0704\u0708\u070b\u070e\u071d\u0720")
buf.write("\u0733\u0737\u073f\u0743\u075c\u075f\u0768\u076e\u0774")
buf.write("\u077a\u0787\u0790\u07a6\u07a9\u07ac\u07b6\u07b8\u07c1")
buf.write("\u07c7\u07c9\u07d1\u07db\u07e1\u07ef\u07f8\u07ff\u0804")
buf.write("\u080b\u0815\u081a\u0821\u083b\u0840\u0842\u0849\u084d")
buf.write("\u0854\u0858\u0869\u0878\u087f\u0888\u0892\u0897\u08a0")
buf.write("\u08a5\u08ad\u08b5\u08b8\u08be\u08c1\u08c8\u08d0\u08d3")
buf.write("\u08db\u08de\u08f8\u0903\u0908\u090f\u0911\u091e\u092d")
buf.write("\u0931\u0935\u0939\u093f\u0943\u0947\u094b\u094d\u0957")
buf.write("\u095e\u0967\u096e\u0975\u097c\u0986\u0994\u099b\u09a2")
buf.write("\u09aa\u09ad\u09b1\u09b5\u09b8")
return buf.getvalue()
class SqlBaseParser ( Parser ):
grammarFileName = "SqlBase.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'.'", "'('", "')'", "','", "'SKIP'",
"'->'", "'['", "']'", "'|'", "'^'", "'$'", "'{-'",
"'-}'", "'{'", "'}'", "'=>'", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "'NFC'", "'NFD'",
"'NFKC'", "'NFKD'", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "'='", "<INVALID>", "'<'",
"'<='", "'>'", "'>='", "'+'", "'-'", "'*'", "'/'",
"'%'", "'||'", "'?'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "ADD", "ADMIN", "AFTER", "ALL", "ALTER",
"ANALYZE", "AND", "ANY", "ARRAY", "AS", "ASC", "AT",
"AUTHORIZATION", "BERNOULLI", "BETWEEN", "BY", "CALL",
"CASCADE", "CASE", "CAST", "CATALOGS", "COLUMN", "COLUMNS",
"COMMENT", "COMMIT", "COMMITTED", "CONSTRAINT", "COUNT",
"CREATE", "CROSS", "CUBE", "CURRENT", "CURRENT_CATALOG",
"CURRENT_DATE", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA",
"CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER",
"DATA", "DATE", "DAY", "DEALLOCATE", "DEFINER", "DELETE",
"DENY", "DESC", "DESCRIBE", "DEFINE", "DISTINCT",
"DISTRIBUTED", "DOUBLE", "DROP", "ELSE", "EMPTY",
"END", "ERROR", "ESCAPE", "EXCEPT", "EXCLUDING", "EXECUTE",
"EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FETCH",
"FILTER", "FINAL", "FIRST", "FOLLOWING", "FOR", "FORMAT",
"FROM", "FULL", "FUNCTIONS", "GRANT", "GRANTED", "GRANTS",
"GRAPHVIZ", "GROUP", "GROUPING", "GROUPS", "HAVING",
"HOUR", "IF", "IGNORE", "IN", "INCLUDING", "INITIAL",
"INNER", "INPUT", "INSERT", "INTERSECT", "INTERVAL",
"INTO", "INVOKER", "IO", "IS", "ISOLATION", "JOIN",
"JSON", "LAST", "LATERAL", "LEFT", "LEVEL", "LIKE",
"LIMIT", "LISTAGG", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP",
"LOGICAL", "MAP", "MATCH", "MATCHED", "MATCHES", "MATCH_RECOGNIZE",
"MATERIALIZED", "MEASURES", "MERGE", "MINUTE", "MONTH",
"NATURAL", "NEXT", "NFC", "NFD", "NFKC", "NFKD", "NO",
"NONE", "NORMALIZE", "NOT", "NULL", "NULLIF", "NULLS",
"OFFSET", "OMIT", "OF", "ON", "ONE", "ONLY", "OPTION",
"OR", "ORDER", "ORDINALITY", "OUTER", "OUTPUT", "OVER",
"OVERFLOW", "PARTITION", "PARTITIONS", "PAST", "PATH",
"PATTERN", "PER", "PERMUTE", "POSITION", "PRECEDING",
"PRECISION", "PREPARE", "PRIVILEGES", "PROPERTIES",
"RANGE", "READ", "RECURSIVE", "REFRESH", "RENAME",
"REPEATABLE", "REPLACE", "RESET", "RESPECT", "RESTRICT",
"REVOKE", "RIGHT", "ROLE", "ROLES", "ROLLBACK", "ROLLUP",
"ROW", "ROWS", "RUNNING", "SCHEMA", "SCHEMAS", "SECOND",
"SECURITY", "SEEK", "SELECT", "SERIALIZABLE", "SESSION",
"SET", "SETS", "SHOW", "SOME", "START", "STATS", "SUBSET",
"SUBSTRING", "SYSTEM", "TABLE", "TABLES", "TABLESAMPLE",
"TEXT", "THEN", "TIES", "TIME", "TIMESTAMP", "TO",
"TRANSACTION", "TRUE", "TRUNCATE", "TRY_CAST", "TYPE",
"UESCAPE", "UNBOUNDED", "UNCOMMITTED", "UNION", "UNMATCHED",
"UNNEST", "UPDATE", "USE", "USER", "USING", "VALIDATE",
"VALUES", "VERBOSE", "VERSION", "VIEW", "WHEN", "WHERE",
"WINDOW", "WITH", "WITHIN", "WITHOUT", "WORK", "WRITE",
"YEAR", "ZONE", "EQ", "NEQ", "LT", "LTE", "GT", "GTE",
"PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CONCAT",
"QUESTION_MARK", "STRING", "UNICODE_STRING", "BINARY_LITERAL",
"INTEGER_VALUE", "DECIMAL_VALUE", "DOUBLE_VALUE",
"IDENTIFIER", "DIGIT_IDENTIFIER", "QUOTED_IDENTIFIER",
"BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT",
"WS", "UNRECOGNIZED", "DELIMITER" ]
RULE_singleStatement = 0
RULE_standaloneExpression = 1
RULE_standalonePathSpecification = 2
RULE_standaloneType = 3
RULE_standaloneRowPattern = 4
RULE_statement = 5
RULE_query = 6
RULE_with_ = 7
RULE_tableElement = 8
RULE_columnDefinition = 9
RULE_likeClause = 10
RULE_properties = 11
RULE_propertyAssignments = 12
RULE_property_ = 13
RULE_queryNoWith = 14
RULE_limitRowCount = 15
RULE_rowCount = 16
RULE_queryTerm = 17
RULE_queryPrimary = 18
RULE_sortItem = 19
RULE_querySpecification = 20
RULE_groupBy = 21
RULE_groupingElement = 22
RULE_groupingSet = 23
RULE_windowDefinition = 24
RULE_windowSpecification = 25
RULE_namedQuery = 26
RULE_setQuantifier = 27
RULE_selectItem = 28
RULE_relation = 29
RULE_joinType = 30
RULE_joinCriteria = 31
RULE_sampledRelation = 32
RULE_sampleType = 33
RULE_listAggOverflowBehavior = 34
RULE_listaggCountIndication = 35
RULE_patternRecognition = 36
RULE_measureDefinition = 37
RULE_rowsPerMatch = 38
RULE_emptyMatchHandling = 39
RULE_skipTo = 40
RULE_subsetDefinition = 41
RULE_variableDefinition = 42
RULE_aliasedRelation = 43
RULE_columnAliases = 44
RULE_relationPrimary = 45
RULE_expression = 46
RULE_booleanExpression = 47
RULE_predicate = 48
RULE_valueExpression = 49
RULE_primaryExpression = 50
RULE_processingMode = 51
RULE_nullTreatment = 52
RULE_string = 53
RULE_timeZoneSpecifier = 54
RULE_comparisonOperator = 55
RULE_comparisonQuantifier = 56
RULE_booleanValue = 57
RULE_interval = 58
RULE_intervalField = 59
RULE_normalForm = 60
RULE_type_ = 61
RULE_rowField = 62
RULE_typeParameter = 63
RULE_whenClause = 64
RULE_filter_ = 65
RULE_mergeCase = 66
RULE_over = 67
RULE_windowFrame = 68
RULE_frameExtent = 69
RULE_frameBound = 70
RULE_rowPattern = 71
RULE_patternPrimary = 72
RULE_patternQuantifier = 73
RULE_updateAssignment = 74
RULE_explainOption = 75
RULE_transactionMode = 76
RULE_levelOfIsolation = 77
RULE_callArgument = 78
RULE_pathElement = 79
RULE_pathSpecification = 80
RULE_privilege = 81
RULE_qualifiedName = 82
RULE_queryPeriod = 83
RULE_rangeType = 84
RULE_grantor = 85
RULE_principal = 86
RULE_roles = 87
RULE_identifier = 88
RULE_number = 89
RULE_nonReserved = 90
ruleNames = [ "singleStatement", "standaloneExpression", "standalonePathSpecification",
"standaloneType", "standaloneRowPattern", "statement",
"query", "with_", "tableElement", "columnDefinition",
"likeClause", "properties", "propertyAssignments", "property_",
"queryNoWith", "limitRowCount", "rowCount", "queryTerm",
"queryPrimary", "sortItem", "querySpecification", "groupBy",
"groupingElement", "groupingSet", "windowDefinition",
"windowSpecification", "namedQuery", "setQuantifier",
"selectItem", "relation", "joinType", "joinCriteria",
"sampledRelation", "sampleType", "listAggOverflowBehavior",
"listaggCountIndication", "patternRecognition", "measureDefinition",
"rowsPerMatch", "emptyMatchHandling", "skipTo", "subsetDefinition",
"variableDefinition", "aliasedRelation", "columnAliases",
"relationPrimary", "expression", "booleanExpression",
"predicate", "valueExpression", "primaryExpression",
"processingMode", "nullTreatment", "string", "timeZoneSpecifier",
"comparisonOperator", "comparisonQuantifier", "booleanValue",
"interval", "intervalField", "normalForm", "type_", "rowField",
"typeParameter", "whenClause", "filter_", "mergeCase",
"over", "windowFrame", "frameExtent", "frameBound", "rowPattern",
"patternPrimary", "patternQuantifier", "updateAssignment",
"explainOption", "transactionMode", "levelOfIsolation",
"callArgument", "pathElement", "pathSpecification", "privilege",
"qualifiedName", "queryPeriod", "rangeType", "grantor",
"principal", "roles", "identifier", "number", "nonReserved" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
ADD=17
ADMIN=18
AFTER=19
ALL=20
ALTER=21
ANALYZE=22
AND=23
ANY=24
ARRAY=25
AS=26
ASC=27
AT=28
AUTHORIZATION=29
BERNOULLI=30
BETWEEN=31
BY=32
CALL=33
CASCADE=34
CASE=35
CAST=36
CATALOGS=37
COLUMN=38
COLUMNS=39
COMMENT=40
COMMIT=41
COMMITTED=42
CONSTRAINT=43
COUNT=44
CREATE=45
CROSS=46
CUBE=47
CURRENT=48
CURRENT_CATALOG=49
CURRENT_DATE=50
CURRENT_PATH=51
CURRENT_ROLE=52
CURRENT_SCHEMA=53
CURRENT_TIME=54
CURRENT_TIMESTAMP=55
CURRENT_USER=56
DATA=57
DATE=58
DAY=59
DEALLOCATE=60
DEFINER=61
DELETE=62
DENY=63
DESC=64
DESCRIBE=65
DEFINE=66
DISTINCT=67
DISTRIBUTED=68
DOUBLE=69
DROP=70
ELSE=71
EMPTY=72
END=73
ERROR=74
ESCAPE=75
EXCEPT=76
EXCLUDING=77
EXECUTE=78
EXISTS=79
EXPLAIN=80
EXTRACT=81
FALSE=82
FETCH=83
FILTER=84
FINAL=85
FIRST=86
FOLLOWING=87
FOR=88
FORMAT=89
FROM=90
FULL=91
FUNCTIONS=92
GRANT=93
GRANTED=94
GRANTS=95
GRAPHVIZ=96
GROUP=97
GROUPING=98
GROUPS=99
HAVING=100
HOUR=101
IF=102
IGNORE=103
IN=104
INCLUDING=105
INITIAL=106
INNER=107
INPUT=108
INSERT=109
INTERSECT=110
INTERVAL=111
INTO=112
INVOKER=113
IO=114
IS=115
ISOLATION=116
JOIN=117
JSON=118
LAST=119
LATERAL=120
LEFT=121
LEVEL=122
LIKE=123
LIMIT=124
LISTAGG=125
LOCAL=126
LOCALTIME=127
LOCALTIMESTAMP=128
LOGICAL=129
MAP=130
MATCH=131
MATCHED=132
MATCHES=133
MATCH_RECOGNIZE=134
MATERIALIZED=135
MEASURES=136
MERGE=137
MINUTE=138
MONTH=139
NATURAL=140
NEXT=141
NFC=142
NFD=143
NFKC=144
NFKD=145
NO=146
NONE=147
NORMALIZE=148
NOT=149
NULL=150
NULLIF=151
NULLS=152
OFFSET=153
OMIT=154
OF=155
ON=156
ONE=157
ONLY=158
OPTION=159
OR=160
ORDER=161
ORDINALITY=162
OUTER=163
OUTPUT=164
OVER=165
OVERFLOW=166
PARTITION=167
PARTITIONS=168
PAST=169
PATH=170
PATTERN=171
PER=172
PERMUTE=173
POSITION=174
PRECEDING=175
PRECISION=176
PREPARE=177
PRIVILEGES=178
PROPERTIES=179
RANGE=180
READ=181
RECURSIVE=182
REFRESH=183
RENAME=184
REPEATABLE=185
REPLACE=186
RESET=187
RESPECT=188
RESTRICT=189
REVOKE=190
RIGHT=191
ROLE=192
ROLES=193
ROLLBACK=194
ROLLUP=195
ROW=196
ROWS=197
RUNNING=198
SCHEMA=199
SCHEMAS=200
SECOND=201
SECURITY=202
SEEK=203
SELECT=204
SERIALIZABLE=205
SESSION=206
SET=207
SETS=208
SHOW=209
SOME=210
START=211
STATS=212
SUBSET=213
SUBSTRING=214
SYSTEM=215
TABLE=216
TABLES=217
TABLESAMPLE=218
TEXT=219
THEN=220
TIES=221
TIME=222
TIMESTAMP=223
TO=224
TRANSACTION=225
TRUE=226
TRUNCATE=227
TRY_CAST=228
TYPE=229
UESCAPE=230
UNBOUNDED=231
UNCOMMITTED=232
UNION=233
UNMATCHED=234
UNNEST=235
UPDATE=236
USE=237
USER=238
USING=239
VALIDATE=240
VALUES=241
VERBOSE=242
VERSION=243
VIEW=244
WHEN=245
WHERE=246
WINDOW=247
WITH=248
WITHIN=249
WITHOUT=250
WORK=251
WRITE=252
YEAR=253
ZONE=254
EQ=255
NEQ=256
LT=257
LTE=258
GT=259
GTE=260
PLUS=261
MINUS=262
ASTERISK=263
SLASH=264
PERCENT=265
CONCAT=266
QUESTION_MARK=267
STRING=268
UNICODE_STRING=269
BINARY_LITERAL=270
INTEGER_VALUE=271
DECIMAL_VALUE=272
DOUBLE_VALUE=273
IDENTIFIER=274
DIGIT_IDENTIFIER=275
QUOTED_IDENTIFIER=276
BACKQUOTED_IDENTIFIER=277
SIMPLE_COMMENT=278
BRACKETED_COMMENT=279
WS=280
UNRECOGNIZED=281
DELIMITER=282
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class SingleStatementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def statement(self):
return self.getTypedRuleContext(SqlBaseParser.StatementContext,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_singleStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSingleStatement" ):
listener.enterSingleStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSingleStatement" ):
listener.exitSingleStatement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSingleStatement" ):
return visitor.visitSingleStatement(self)
else:
return visitor.visitChildren(self)
def singleStatement(self):
localctx = SqlBaseParser.SingleStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_singleStatement)
try:
self.enterOuterAlt(localctx, 1)
self.state = 182
self.statement()
self.state = 183
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StandaloneExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_standaloneExpression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStandaloneExpression" ):
listener.enterStandaloneExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStandaloneExpression" ):
listener.exitStandaloneExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStandaloneExpression" ):
return visitor.visitStandaloneExpression(self)
else:
return visitor.visitChildren(self)
def standaloneExpression(self):
localctx = SqlBaseParser.StandaloneExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_standaloneExpression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 185
self.expression()
self.state = 186
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StandalonePathSpecificationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def pathSpecification(self):
return self.getTypedRuleContext(SqlBaseParser.PathSpecificationContext,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_standalonePathSpecification
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStandalonePathSpecification" ):
listener.enterStandalonePathSpecification(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStandalonePathSpecification" ):
listener.exitStandalonePathSpecification(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStandalonePathSpecification" ):
return visitor.visitStandalonePathSpecification(self)
else:
return visitor.visitChildren(self)
def standalonePathSpecification(self):
localctx = SqlBaseParser.StandalonePathSpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_standalonePathSpecification)
try:
self.enterOuterAlt(localctx, 1)
self.state = 188
self.pathSpecification()
self.state = 189
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StandaloneTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_standaloneType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStandaloneType" ):
listener.enterStandaloneType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStandaloneType" ):
listener.exitStandaloneType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStandaloneType" ):
return visitor.visitStandaloneType(self)
else:
return visitor.visitChildren(self)
def standaloneType(self):
localctx = SqlBaseParser.StandaloneTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_standaloneType)
try:
self.enterOuterAlt(localctx, 1)
self.state = 191
self.type_(0)
self.state = 192
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StandaloneRowPatternContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def EOF(self):
return self.getToken(SqlBaseParser.EOF, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_standaloneRowPattern
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStandaloneRowPattern" ):
listener.enterStandaloneRowPattern(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStandaloneRowPattern" ):
listener.exitStandaloneRowPattern(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStandaloneRowPattern" ):
return visitor.visitStandaloneRowPattern(self)
else:
return visitor.visitChildren(self)
def standaloneRowPattern(self):
localctx = SqlBaseParser.StandaloneRowPatternContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_standaloneRowPattern)
try:
self.enterOuterAlt(localctx, 1)
self.state = 194
self.rowPattern(0)
self.state = 195
self.match(SqlBaseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_statement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ExplainContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def EXPLAIN(self):
return self.getToken(SqlBaseParser.EXPLAIN, 0)
def statement(self):
return self.getTypedRuleContext(SqlBaseParser.StatementContext,0)
def explainOption(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExplainOptionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExplainOptionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExplain" ):
listener.enterExplain(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExplain" ):
listener.exitExplain(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExplain" ):
return visitor.visitExplain(self)
else:
return visitor.visitChildren(self)
class PrepareContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def PREPARE(self):
return self.getToken(SqlBaseParser.PREPARE, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def statement(self):
return self.getTypedRuleContext(SqlBaseParser.StatementContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrepare" ):
listener.enterPrepare(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrepare" ):
listener.exitPrepare(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrepare" ):
return visitor.visitPrepare(self)
else:
return visitor.visitChildren(self)
class DropMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropMaterializedView" ):
listener.enterDropMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropMaterializedView" ):
listener.exitDropMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropMaterializedView" ):
return visitor.visitDropMaterializedView(self)
else:
return visitor.visitChildren(self)
class UseContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.schema = None # IdentifierContext
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def USE(self):
return self.getToken(SqlBaseParser.USE, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUse" ):
listener.enterUse(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUse" ):
listener.exitUse(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUse" ):
return visitor.visitUse(self)
else:
return visitor.visitChildren(self)
class DeallocateContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DEALLOCATE(self):
return self.getToken(SqlBaseParser.DEALLOCATE, 0)
def PREPARE(self):
return self.getToken(SqlBaseParser.PREPARE, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeallocate" ):
listener.enterDeallocate(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeallocate" ):
listener.exitDeallocate(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeallocate" ):
return visitor.visitDeallocate(self)
else:
return visitor.visitChildren(self)
class RenameTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.from_ = None # QualifiedNameContext
self.to = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameTable" ):
listener.enterRenameTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameTable" ):
listener.exitRenameTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameTable" ):
return visitor.visitRenameTable(self)
else:
return visitor.visitChildren(self)
class CommitContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def COMMIT(self):
return self.getToken(SqlBaseParser.COMMIT, 0)
def WORK(self):
return self.getToken(SqlBaseParser.WORK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCommit" ):
listener.enterCommit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCommit" ):
listener.exitCommit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCommit" ):
return visitor.visitCommit(self)
else:
return visitor.visitChildren(self)
class CreateRoleContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.name = None # IdentifierContext
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def ADMIN(self):
return self.getToken(SqlBaseParser.ADMIN, 0)
def grantor(self):
return self.getTypedRuleContext(SqlBaseParser.GrantorContext,0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateRole" ):
listener.enterCreateRole(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateRole" ):
listener.exitCreateRole(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateRole" ):
return visitor.visitCreateRole(self)
else:
return visitor.visitChildren(self)
class DropColumnContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.column = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def IF(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.IF)
else:
return self.getToken(SqlBaseParser.IF, i)
def EXISTS(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.EXISTS)
else:
return self.getToken(SqlBaseParser.EXISTS, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropColumn" ):
listener.enterDropColumn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropColumn" ):
listener.exitDropColumn(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropColumn" ):
return visitor.visitDropColumn(self)
else:
return visitor.visitChildren(self)
class DropViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropView" ):
listener.enterDropView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropView" ):
listener.exitDropView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropView" ):
return visitor.visitDropView(self)
else:
return visitor.visitChildren(self)
class ShowTablesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def TABLES(self):
return self.getToken(SqlBaseParser.TABLES, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowTables" ):
listener.enterShowTables(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowTables" ):
listener.exitShowTables(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowTables" ):
return visitor.visitShowTables(self)
else:
return visitor.visitChildren(self)
class SetViewAuthorizationContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.from_ = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetViewAuthorization" ):
listener.enterSetViewAuthorization(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetViewAuthorization" ):
listener.exitSetViewAuthorization(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetViewAuthorization" ):
return visitor.visitSetViewAuthorization(self)
else:
return visitor.visitChildren(self)
class ShowCatalogsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CATALOGS(self):
return self.getToken(SqlBaseParser.CATALOGS, 0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCatalogs" ):
listener.enterShowCatalogs(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCatalogs" ):
listener.exitShowCatalogs(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCatalogs" ):
return visitor.visitShowCatalogs(self)
else:
return visitor.visitChildren(self)
class ShowRolesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def ROLES(self):
return self.getToken(SqlBaseParser.ROLES, 0)
def CURRENT(self):
return self.getToken(SqlBaseParser.CURRENT, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowRoles" ):
listener.enterShowRoles(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowRoles" ):
listener.exitShowRoles(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowRoles" ):
return visitor.visitShowRoles(self)
else:
return visitor.visitChildren(self)
class MergeContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def MERGE(self):
return self.getToken(SqlBaseParser.MERGE, 0)
def INTO(self):
return self.getToken(SqlBaseParser.INTO, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def USING(self):
return self.getToken(SqlBaseParser.USING, 0)
def relation(self):
return self.getTypedRuleContext(SqlBaseParser.RelationContext,0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def mergeCase(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.MergeCaseContext)
else:
return self.getTypedRuleContext(SqlBaseParser.MergeCaseContext,i)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMerge" ):
listener.enterMerge(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMerge" ):
listener.exitMerge(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMerge" ):
return visitor.visitMerge(self)
else:
return visitor.visitChildren(self)
class RenameColumnContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.from_ = None # IdentifierContext
self.to = None # IdentifierContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def IF(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.IF)
else:
return self.getToken(SqlBaseParser.IF, i)
def EXISTS(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.EXISTS)
else:
return self.getToken(SqlBaseParser.EXISTS, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameColumn" ):
listener.enterRenameColumn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameColumn" ):
listener.exitRenameColumn(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameColumn" ):
return visitor.visitRenameColumn(self)
else:
return visitor.visitChildren(self)
class CommentColumnContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IS(self):
return self.getToken(SqlBaseParser.IS, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCommentColumn" ):
listener.enterCommentColumn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCommentColumn" ):
listener.exitCommentColumn(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCommentColumn" ):
return visitor.visitCommentColumn(self)
else:
return visitor.visitChildren(self)
class RevokeRolesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def REVOKE(self):
return self.getToken(SqlBaseParser.REVOKE, 0)
def roles(self):
return self.getTypedRuleContext(SqlBaseParser.RolesContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def principal(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrincipalContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,i)
def ADMIN(self):
return self.getToken(SqlBaseParser.ADMIN, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def GRANTED(self):
return self.getToken(SqlBaseParser.GRANTED, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def grantor(self):
return self.getTypedRuleContext(SqlBaseParser.GrantorContext,0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRevokeRoles" ):
listener.enterRevokeRoles(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRevokeRoles" ):
listener.exitRevokeRoles(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRevokeRoles" ):
return visitor.visitRevokeRoles(self)
else:
return visitor.visitChildren(self)
class ShowCreateTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCreateTable" ):
listener.enterShowCreateTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCreateTable" ):
listener.exitShowCreateTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCreateTable" ):
return visitor.visitShowCreateTable(self)
else:
return visitor.visitChildren(self)
class ShowColumnsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def COLUMNS(self):
return self.getToken(SqlBaseParser.COLUMNS, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def DESCRIBE(self):
return self.getToken(SqlBaseParser.DESCRIBE, 0)
def DESC(self):
return self.getToken(SqlBaseParser.DESC, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowColumns" ):
listener.enterShowColumns(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowColumns" ):
listener.exitShowColumns(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowColumns" ):
return visitor.visitShowColumns(self)
else:
return visitor.visitChildren(self)
class ShowRoleGrantsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def GRANTS(self):
return self.getToken(SqlBaseParser.GRANTS, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowRoleGrants" ):
listener.enterShowRoleGrants(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowRoleGrants" ):
listener.exitShowRoleGrants(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowRoleGrants" ):
return visitor.visitShowRoleGrants(self)
else:
return visitor.visitChildren(self)
class AddColumnContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.column = None # ColumnDefinitionContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def ADD(self):
return self.getToken(SqlBaseParser.ADD, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def columnDefinition(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnDefinitionContext,0)
def IF(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.IF)
else:
return self.getToken(SqlBaseParser.IF, i)
def EXISTS(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.EXISTS)
else:
return self.getToken(SqlBaseParser.EXISTS, i)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAddColumn" ):
listener.enterAddColumn(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAddColumn" ):
listener.exitAddColumn(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAddColumn" ):
return visitor.visitAddColumn(self)
else:
return visitor.visitChildren(self)
class DenyContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.grantee = None # PrincipalContext
self.copyFrom(ctx)
def DENY(self):
return self.getToken(SqlBaseParser.DENY, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def privilege(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrivilegeContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrivilegeContext,i)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def PRIVILEGES(self):
return self.getToken(SqlBaseParser.PRIVILEGES, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeny" ):
listener.enterDeny(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeny" ):
listener.exitDeny(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeny" ):
return visitor.visitDeny(self)
else:
return visitor.visitChildren(self)
class ResetSessionContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def RESET(self):
return self.getToken(SqlBaseParser.RESET, 0)
def SESSION(self):
return self.getToken(SqlBaseParser.SESSION, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterResetSession" ):
listener.enterResetSession(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitResetSession" ):
listener.exitResetSession(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitResetSession" ):
return visitor.visitResetSession(self)
else:
return visitor.visitChildren(self)
class InsertIntoContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def INSERT(self):
return self.getToken(SqlBaseParser.INSERT, 0)
def INTO(self):
return self.getToken(SqlBaseParser.INTO, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInsertInto" ):
listener.enterInsertInto(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInsertInto" ):
listener.exitInsertInto(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInsertInto" ):
return visitor.visitInsertInto(self)
else:
return visitor.visitChildren(self)
class ShowSessionContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def SESSION(self):
return self.getToken(SqlBaseParser.SESSION, 0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowSession" ):
listener.enterShowSession(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowSession" ):
listener.exitShowSession(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowSession" ):
return visitor.visitShowSession(self)
else:
return visitor.visitChildren(self)
class CreateSchemaContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateSchema" ):
listener.enterCreateSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateSchema" ):
listener.exitCreateSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateSchema" ):
return visitor.visitCreateSchema(self)
else:
return visitor.visitChildren(self)
class ExplainAnalyzeContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def EXPLAIN(self):
return self.getToken(SqlBaseParser.EXPLAIN, 0)
def ANALYZE(self):
return self.getToken(SqlBaseParser.ANALYZE, 0)
def statement(self):
return self.getTypedRuleContext(SqlBaseParser.StatementContext,0)
def VERBOSE(self):
return self.getToken(SqlBaseParser.VERBOSE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExplainAnalyze" ):
listener.enterExplainAnalyze(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExplainAnalyze" ):
listener.exitExplainAnalyze(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExplainAnalyze" ):
return visitor.visitExplainAnalyze(self)
else:
return visitor.visitChildren(self)
class ExecuteContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def EXECUTE(self):
return self.getToken(SqlBaseParser.EXECUTE, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def USING(self):
return self.getToken(SqlBaseParser.USING, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExecute" ):
listener.enterExecute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExecute" ):
listener.exitExecute(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExecute" ):
return visitor.visitExecute(self)
else:
return visitor.visitChildren(self)
class RenameSchemaContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameSchema" ):
listener.enterRenameSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameSchema" ):
listener.exitRenameSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameSchema" ):
return visitor.visitRenameSchema(self)
else:
return visitor.visitChildren(self)
class DropRoleContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.name = None # IdentifierContext
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropRole" ):
listener.enterDropRole(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropRole" ):
listener.exitDropRole(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropRole" ):
return visitor.visitDropRole(self)
else:
return visitor.visitChildren(self)
class AnalyzeContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def ANALYZE(self):
return self.getToken(SqlBaseParser.ANALYZE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAnalyze" ):
listener.enterAnalyze(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAnalyze" ):
listener.exitAnalyze(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAnalyze" ):
return visitor.visitAnalyze(self)
else:
return visitor.visitChildren(self)
class SetRoleContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.role = None # IdentifierContext
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def NONE(self):
return self.getToken(SqlBaseParser.NONE, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetRole" ):
listener.enterSetRole(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetRole" ):
listener.exitSetRole(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetRole" ):
return visitor.visitSetRole(self)
else:
return visitor.visitChildren(self)
class ShowGrantsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def GRANTS(self):
return self.getToken(SqlBaseParser.GRANTS, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowGrants" ):
listener.enterShowGrants(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowGrants" ):
listener.exitShowGrants(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowGrants" ):
return visitor.visitShowGrants(self)
else:
return visitor.visitChildren(self)
class DropSchemaContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def CASCADE(self):
return self.getToken(SqlBaseParser.CASCADE, 0)
def RESTRICT(self):
return self.getToken(SqlBaseParser.RESTRICT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropSchema" ):
listener.enterDropSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropSchema" ):
listener.exitDropSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropSchema" ):
return visitor.visitDropSchema(self)
else:
return visitor.visitChildren(self)
class SetTableAuthorizationContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetTableAuthorization" ):
listener.enterSetTableAuthorization(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetTableAuthorization" ):
listener.exitSetTableAuthorization(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetTableAuthorization" ):
return visitor.visitSetTableAuthorization(self)
else:
return visitor.visitChildren(self)
class ShowCreateViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCreateView" ):
listener.enterShowCreateView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCreateView" ):
listener.exitShowCreateView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCreateView" ):
return visitor.visitShowCreateView(self)
else:
return visitor.visitChildren(self)
class CreateTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def tableElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.TableElementContext)
else:
return self.getTypedRuleContext(SqlBaseParser.TableElementContext,i)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateTable" ):
listener.enterCreateTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateTable" ):
listener.exitCreateTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateTable" ):
return visitor.visitCreateTable(self)
else:
return visitor.visitChildren(self)
class StartTransactionContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def START(self):
return self.getToken(SqlBaseParser.START, 0)
def TRANSACTION(self):
return self.getToken(SqlBaseParser.TRANSACTION, 0)
def transactionMode(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.TransactionModeContext)
else:
return self.getTypedRuleContext(SqlBaseParser.TransactionModeContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStartTransaction" ):
listener.enterStartTransaction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStartTransaction" ):
listener.exitStartTransaction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStartTransaction" ):
return visitor.visitStartTransaction(self)
else:
return visitor.visitChildren(self)
class CreateTableAsSelectContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def WITH(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.WITH)
else:
return self.getToken(SqlBaseParser.WITH, i)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def DATA(self):
return self.getToken(SqlBaseParser.DATA, 0)
def NO(self):
return self.getToken(SqlBaseParser.NO, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateTableAsSelect" ):
listener.enterCreateTableAsSelect(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateTableAsSelect" ):
listener.exitCreateTableAsSelect(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateTableAsSelect" ):
return visitor.visitCreateTableAsSelect(self)
else:
return visitor.visitChildren(self)
class ShowStatsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def STATS(self):
return self.getToken(SqlBaseParser.STATS, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowStats" ):
listener.enterShowStats(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowStats" ):
listener.exitShowStats(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowStats" ):
return visitor.visitShowStats(self)
else:
return visitor.visitChildren(self)
class ShowCreateSchemaContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCreateSchema" ):
listener.enterShowCreateSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCreateSchema" ):
listener.exitShowCreateSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCreateSchema" ):
return visitor.visitShowCreateSchema(self)
else:
return visitor.visitChildren(self)
class RevokeContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.grantee = None # PrincipalContext
self.copyFrom(ctx)
def REVOKE(self):
return self.getToken(SqlBaseParser.REVOKE, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def privilege(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrivilegeContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrivilegeContext,i)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def PRIVILEGES(self):
return self.getToken(SqlBaseParser.PRIVILEGES, 0)
def GRANT(self):
return self.getToken(SqlBaseParser.GRANT, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRevoke" ):
listener.enterRevoke(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRevoke" ):
listener.exitRevoke(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRevoke" ):
return visitor.visitRevoke(self)
else:
return visitor.visitChildren(self)
class UpdateContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.where = None # BooleanExpressionContext
self.copyFrom(ctx)
def UPDATE(self):
return self.getToken(SqlBaseParser.UPDATE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def updateAssignment(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.UpdateAssignmentContext)
else:
return self.getTypedRuleContext(SqlBaseParser.UpdateAssignmentContext,i)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUpdate" ):
listener.enterUpdate(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUpdate" ):
listener.exitUpdate(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUpdate" ):
return visitor.visitUpdate(self)
else:
return visitor.visitChildren(self)
class TableExecuteContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.procedureName = None # IdentifierContext
self.where = None # BooleanExpressionContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def EXECUTE(self):
return self.getToken(SqlBaseParser.EXECUTE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def callArgument(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.CallArgumentContext)
else:
return self.getTypedRuleContext(SqlBaseParser.CallArgumentContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTableExecute" ):
listener.enterTableExecute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTableExecute" ):
listener.exitTableExecute(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTableExecute" ):
return visitor.visitTableExecute(self)
else:
return visitor.visitChildren(self)
class DeleteContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DELETE(self):
return self.getToken(SqlBaseParser.DELETE, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelete" ):
listener.enterDelete(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelete" ):
listener.exitDelete(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelete" ):
return visitor.visitDelete(self)
else:
return visitor.visitChildren(self)
class DescribeInputContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DESCRIBE(self):
return self.getToken(SqlBaseParser.DESCRIBE, 0)
def INPUT(self):
return self.getToken(SqlBaseParser.INPUT, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDescribeInput" ):
listener.enterDescribeInput(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDescribeInput" ):
listener.exitDescribeInput(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDescribeInput" ):
return visitor.visitDescribeInput(self)
else:
return visitor.visitChildren(self)
class ShowStatsForQueryContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def STATS(self):
return self.getToken(SqlBaseParser.STATS, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowStatsForQuery" ):
listener.enterShowStatsForQuery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowStatsForQuery" ):
listener.exitShowStatsForQuery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowStatsForQuery" ):
return visitor.visitShowStatsForQuery(self)
else:
return visitor.visitChildren(self)
class StatementDefaultContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStatementDefault" ):
listener.enterStatementDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStatementDefault" ):
listener.exitStatementDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStatementDefault" ):
return visitor.visitStatementDefault(self)
else:
return visitor.visitChildren(self)
class SetTimeZoneContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def TIME(self):
return self.getToken(SqlBaseParser.TIME, 0)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def LOCAL(self):
return self.getToken(SqlBaseParser.LOCAL, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetTimeZone" ):
listener.enterSetTimeZone(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetTimeZone" ):
listener.exitSetTimeZone(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetTimeZone" ):
return visitor.visitSetTimeZone(self)
else:
return visitor.visitChildren(self)
class TruncateTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def TRUNCATE(self):
return self.getToken(SqlBaseParser.TRUNCATE, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTruncateTable" ):
listener.enterTruncateTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTruncateTable" ):
listener.exitTruncateTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTruncateTable" ):
return visitor.visitTruncateTable(self)
else:
return visitor.visitChildren(self)
class CreateMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def OR(self):
return self.getToken(SqlBaseParser.OR, 0)
def REPLACE(self):
return self.getToken(SqlBaseParser.REPLACE, 0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateMaterializedView" ):
listener.enterCreateMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateMaterializedView" ):
listener.exitCreateMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateMaterializedView" ):
return visitor.visitCreateMaterializedView(self)
else:
return visitor.visitChildren(self)
class SetSessionContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def SESSION(self):
return self.getToken(SqlBaseParser.SESSION, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetSession" ):
listener.enterSetSession(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetSession" ):
listener.exitSetSession(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetSession" ):
return visitor.visitSetSession(self)
else:
return visitor.visitChildren(self)
class CreateViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def OR(self):
return self.getToken(SqlBaseParser.OR, 0)
def REPLACE(self):
return self.getToken(SqlBaseParser.REPLACE, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def SECURITY(self):
return self.getToken(SqlBaseParser.SECURITY, 0)
def DEFINER(self):
return self.getToken(SqlBaseParser.DEFINER, 0)
def INVOKER(self):
return self.getToken(SqlBaseParser.INVOKER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCreateView" ):
listener.enterCreateView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCreateView" ):
listener.exitCreateView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCreateView" ):
return visitor.visitCreateView(self)
else:
return visitor.visitChildren(self)
class RenameMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.from_ = None # QualifiedNameContext
self.to = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameMaterializedView" ):
listener.enterRenameMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameMaterializedView" ):
listener.exitRenameMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameMaterializedView" ):
return visitor.visitRenameMaterializedView(self)
else:
return visitor.visitChildren(self)
class ShowSchemasContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def SCHEMAS(self):
return self.getToken(SqlBaseParser.SCHEMAS, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowSchemas" ):
listener.enterShowSchemas(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowSchemas" ):
listener.exitShowSchemas(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowSchemas" ):
return visitor.visitShowSchemas(self)
else:
return visitor.visitChildren(self)
class DropTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DROP(self):
return self.getToken(SqlBaseParser.DROP, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDropTable" ):
listener.enterDropTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDropTable" ):
listener.exitDropTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDropTable" ):
return visitor.visitDropTable(self)
else:
return visitor.visitChildren(self)
class SetSchemaAuthorizationContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetSchemaAuthorization" ):
listener.enterSetSchemaAuthorization(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetSchemaAuthorization" ):
listener.exitSetSchemaAuthorization(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetSchemaAuthorization" ):
return visitor.visitSetSchemaAuthorization(self)
else:
return visitor.visitChildren(self)
class RollbackContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def ROLLBACK(self):
return self.getToken(SqlBaseParser.ROLLBACK, 0)
def WORK(self):
return self.getToken(SqlBaseParser.WORK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRollback" ):
listener.enterRollback(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRollback" ):
listener.exitRollback(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRollback" ):
return visitor.visitRollback(self)
else:
return visitor.visitChildren(self)
class CommentTableContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def IS(self):
return self.getToken(SqlBaseParser.IS, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCommentTable" ):
listener.enterCommentTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCommentTable" ):
listener.exitCommentTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCommentTable" ):
return visitor.visitCommentTable(self)
else:
return visitor.visitChildren(self)
class RenameViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.from_ = None # QualifiedNameContext
self.to = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRenameView" ):
listener.enterRenameView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRenameView" ):
listener.exitRenameView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRenameView" ):
return visitor.visitRenameView(self)
else:
return visitor.visitChildren(self)
class SetPathContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def PATH(self):
return self.getToken(SqlBaseParser.PATH, 0)
def pathSpecification(self):
return self.getTypedRuleContext(SqlBaseParser.PathSpecificationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetPath" ):
listener.enterSetPath(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetPath" ):
listener.exitSetPath(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetPath" ):
return visitor.visitSetPath(self)
else:
return visitor.visitChildren(self)
class GrantRolesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.catalog = None # IdentifierContext
self.copyFrom(ctx)
def GRANT(self):
return self.getToken(SqlBaseParser.GRANT, 0)
def roles(self):
return self.getTypedRuleContext(SqlBaseParser.RolesContext,0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def principal(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrincipalContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,i)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def ADMIN(self):
return self.getToken(SqlBaseParser.ADMIN, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def GRANTED(self):
return self.getToken(SqlBaseParser.GRANTED, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def grantor(self):
return self.getTypedRuleContext(SqlBaseParser.GrantorContext,0)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrantRoles" ):
listener.enterGrantRoles(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrantRoles" ):
listener.exitGrantRoles(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrantRoles" ):
return visitor.visitGrantRoles(self)
else:
return visitor.visitChildren(self)
class CallContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def CALL(self):
return self.getToken(SqlBaseParser.CALL, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def callArgument(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.CallArgumentContext)
else:
return self.getTypedRuleContext(SqlBaseParser.CallArgumentContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCall" ):
listener.enterCall(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCall" ):
listener.exitCall(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCall" ):
return visitor.visitCall(self)
else:
return visitor.visitChildren(self)
class RefreshMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def REFRESH(self):
return self.getToken(SqlBaseParser.REFRESH, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRefreshMaterializedView" ):
listener.enterRefreshMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRefreshMaterializedView" ):
listener.exitRefreshMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRefreshMaterializedView" ):
return visitor.visitRefreshMaterializedView(self)
else:
return visitor.visitChildren(self)
class ShowCreateMaterializedViewContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowCreateMaterializedView" ):
listener.enterShowCreateMaterializedView(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowCreateMaterializedView" ):
listener.exitShowCreateMaterializedView(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowCreateMaterializedView" ):
return visitor.visitShowCreateMaterializedView(self)
else:
return visitor.visitChildren(self)
class ShowFunctionsContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.pattern = None # StringContext
self.escape = None # StringContext
self.copyFrom(ctx)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def FUNCTIONS(self):
return self.getToken(SqlBaseParser.FUNCTIONS, 0)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def string(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.StringContext)
else:
return self.getTypedRuleContext(SqlBaseParser.StringContext,i)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterShowFunctions" ):
listener.enterShowFunctions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitShowFunctions" ):
listener.exitShowFunctions(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitShowFunctions" ):
return visitor.visitShowFunctions(self)
else:
return visitor.visitChildren(self)
class DescribeOutputContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.copyFrom(ctx)
def DESCRIBE(self):
return self.getToken(SqlBaseParser.DESCRIBE, 0)
def OUTPUT(self):
return self.getToken(SqlBaseParser.OUTPUT, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDescribeOutput" ):
listener.enterDescribeOutput(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDescribeOutput" ):
listener.exitDescribeOutput(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDescribeOutput" ):
return visitor.visitDescribeOutput(self)
else:
return visitor.visitChildren(self)
class GrantContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.grantee = None # PrincipalContext
self.copyFrom(ctx)
def GRANT(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.GRANT)
else:
return self.getToken(SqlBaseParser.GRANT, i)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def privilege(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PrivilegeContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PrivilegeContext,i)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def PRIVILEGES(self):
return self.getToken(SqlBaseParser.PRIVILEGES, 0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrant" ):
listener.enterGrant(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrant" ):
listener.exitGrant(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrant" ):
return visitor.visitGrant(self)
else:
return visitor.visitChildren(self)
class SetTablePropertiesContext(StatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StatementContext
super().__init__(parser)
self.tableName = None # QualifiedNameContext
self.copyFrom(ctx)
def ALTER(self):
return self.getToken(SqlBaseParser.ALTER, 0)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def PROPERTIES(self):
return self.getToken(SqlBaseParser.PROPERTIES, 0)
def propertyAssignments(self):
return self.getTypedRuleContext(SqlBaseParser.PropertyAssignmentsContext,0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetTableProperties" ):
listener.enterSetTableProperties(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetTableProperties" ):
listener.exitSetTableProperties(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetTableProperties" ):
return visitor.visitSetTableProperties(self)
else:
return visitor.visitChildren(self)
def statement(self):
localctx = SqlBaseParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_statement)
self._la = 0 # Token type
try:
self.state = 953
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,104,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.StatementDefaultContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 197
self.query()
elif la_ == 2:
localctx = SqlBaseParser.UseContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 198
self.match(SqlBaseParser.USE)
self.state = 199
localctx.schema = self.identifier()
elif la_ == 3:
localctx = SqlBaseParser.UseContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 200
self.match(SqlBaseParser.USE)
self.state = 201
localctx.catalog = self.identifier()
self.state = 202
self.match(SqlBaseParser.T__0)
self.state = 203
localctx.schema = self.identifier()
elif la_ == 4:
localctx = SqlBaseParser.CreateSchemaContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 205
self.match(SqlBaseParser.CREATE)
self.state = 206
self.match(SqlBaseParser.SCHEMA)
self.state = 210
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,0,self._ctx)
if la_ == 1:
self.state = 207
self.match(SqlBaseParser.IF)
self.state = 208
self.match(SqlBaseParser.NOT)
self.state = 209
self.match(SqlBaseParser.EXISTS)
self.state = 212
self.qualifiedName()
self.state = 215
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AUTHORIZATION:
self.state = 213
self.match(SqlBaseParser.AUTHORIZATION)
self.state = 214
self.principal()
self.state = 219
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 217
self.match(SqlBaseParser.WITH)
self.state = 218
self.properties()
elif la_ == 5:
localctx = SqlBaseParser.DropSchemaContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 221
self.match(SqlBaseParser.DROP)
self.state = 222
self.match(SqlBaseParser.SCHEMA)
self.state = 225
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,3,self._ctx)
if la_ == 1:
self.state = 223
self.match(SqlBaseParser.IF)
self.state = 224
self.match(SqlBaseParser.EXISTS)
self.state = 227
self.qualifiedName()
self.state = 229
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.CASCADE or _la==SqlBaseParser.RESTRICT:
self.state = 228
_la = self._input.LA(1)
if not(_la==SqlBaseParser.CASCADE or _la==SqlBaseParser.RESTRICT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
elif la_ == 6:
localctx = SqlBaseParser.RenameSchemaContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 231
self.match(SqlBaseParser.ALTER)
self.state = 232
self.match(SqlBaseParser.SCHEMA)
self.state = 233
self.qualifiedName()
self.state = 234
self.match(SqlBaseParser.RENAME)
self.state = 235
self.match(SqlBaseParser.TO)
self.state = 236
self.identifier()
elif la_ == 7:
localctx = SqlBaseParser.SetSchemaAuthorizationContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 238
self.match(SqlBaseParser.ALTER)
self.state = 239
self.match(SqlBaseParser.SCHEMA)
self.state = 240
self.qualifiedName()
self.state = 241
self.match(SqlBaseParser.SET)
self.state = 242
self.match(SqlBaseParser.AUTHORIZATION)
self.state = 243
self.principal()
elif la_ == 8:
localctx = SqlBaseParser.CreateTableAsSelectContext(self, localctx)
self.enterOuterAlt(localctx, 8)
self.state = 245
self.match(SqlBaseParser.CREATE)
self.state = 246
self.match(SqlBaseParser.TABLE)
self.state = 250
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,5,self._ctx)
if la_ == 1:
self.state = 247
self.match(SqlBaseParser.IF)
self.state = 248
self.match(SqlBaseParser.NOT)
self.state = 249
self.match(SqlBaseParser.EXISTS)
self.state = 252
self.qualifiedName()
self.state = 254
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 253
self.columnAliases()
self.state = 258
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 256
self.match(SqlBaseParser.COMMENT)
self.state = 257
self.string()
self.state = 262
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 260
self.match(SqlBaseParser.WITH)
self.state = 261
self.properties()
self.state = 264
self.match(SqlBaseParser.AS)
self.state = 270
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,9,self._ctx)
if la_ == 1:
self.state = 265
self.query()
elif la_ == 2:
self.state = 266
self.match(SqlBaseParser.T__1)
self.state = 267
self.query()
self.state = 268
self.match(SqlBaseParser.T__2)
self.state = 277
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 272
self.match(SqlBaseParser.WITH)
self.state = 274
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NO:
self.state = 273
self.match(SqlBaseParser.NO)
self.state = 276
self.match(SqlBaseParser.DATA)
elif la_ == 9:
localctx = SqlBaseParser.CreateTableContext(self, localctx)
self.enterOuterAlt(localctx, 9)
self.state = 279
self.match(SqlBaseParser.CREATE)
self.state = 280
self.match(SqlBaseParser.TABLE)
self.state = 284
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,12,self._ctx)
if la_ == 1:
self.state = 281
self.match(SqlBaseParser.IF)
self.state = 282
self.match(SqlBaseParser.NOT)
self.state = 283
self.match(SqlBaseParser.EXISTS)
self.state = 286
self.qualifiedName()
self.state = 287
self.match(SqlBaseParser.T__1)
self.state = 288
self.tableElement()
self.state = 293
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 289
self.match(SqlBaseParser.T__3)
self.state = 290
self.tableElement()
self.state = 295
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 296
self.match(SqlBaseParser.T__2)
self.state = 299
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 297
self.match(SqlBaseParser.COMMENT)
self.state = 298
self.string()
self.state = 303
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 301
self.match(SqlBaseParser.WITH)
self.state = 302
self.properties()
elif la_ == 10:
localctx = SqlBaseParser.DropTableContext(self, localctx)
self.enterOuterAlt(localctx, 10)
self.state = 305
self.match(SqlBaseParser.DROP)
self.state = 306
self.match(SqlBaseParser.TABLE)
self.state = 309
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
self.state = 307
self.match(SqlBaseParser.IF)
self.state = 308
self.match(SqlBaseParser.EXISTS)
self.state = 311
self.qualifiedName()
elif la_ == 11:
localctx = SqlBaseParser.InsertIntoContext(self, localctx)
self.enterOuterAlt(localctx, 11)
self.state = 312
self.match(SqlBaseParser.INSERT)
self.state = 313
self.match(SqlBaseParser.INTO)
self.state = 314
self.qualifiedName()
self.state = 316
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,17,self._ctx)
if la_ == 1:
self.state = 315
self.columnAliases()
self.state = 318
self.query()
elif la_ == 12:
localctx = SqlBaseParser.DeleteContext(self, localctx)
self.enterOuterAlt(localctx, 12)
self.state = 320
self.match(SqlBaseParser.DELETE)
self.state = 321
self.match(SqlBaseParser.FROM)
self.state = 322
self.qualifiedName()
self.state = 325
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WHERE:
self.state = 323
self.match(SqlBaseParser.WHERE)
self.state = 324
self.booleanExpression(0)
elif la_ == 13:
localctx = SqlBaseParser.TruncateTableContext(self, localctx)
self.enterOuterAlt(localctx, 13)
self.state = 327
self.match(SqlBaseParser.TRUNCATE)
self.state = 328
self.match(SqlBaseParser.TABLE)
self.state = 329
self.qualifiedName()
elif la_ == 14:
localctx = SqlBaseParser.CommentTableContext(self, localctx)
self.enterOuterAlt(localctx, 14)
self.state = 330
self.match(SqlBaseParser.COMMENT)
self.state = 331
self.match(SqlBaseParser.ON)
self.state = 332
self.match(SqlBaseParser.TABLE)
self.state = 333
self.qualifiedName()
self.state = 334
self.match(SqlBaseParser.IS)
self.state = 337
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.STRING, SqlBaseParser.UNICODE_STRING]:
self.state = 335
self.string()
elif token in [SqlBaseParser.NULL]:
self.state = 336
self.match(SqlBaseParser.NULL)
else:
raise NoViableAltException(self)
elif la_ == 15:
localctx = SqlBaseParser.CommentColumnContext(self, localctx)
self.enterOuterAlt(localctx, 15)
self.state = 339
self.match(SqlBaseParser.COMMENT)
self.state = 340
self.match(SqlBaseParser.ON)
self.state = 341
self.match(SqlBaseParser.COLUMN)
self.state = 342
self.qualifiedName()
self.state = 343
self.match(SqlBaseParser.IS)
self.state = 346
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.STRING, SqlBaseParser.UNICODE_STRING]:
self.state = 344
self.string()
elif token in [SqlBaseParser.NULL]:
self.state = 345
self.match(SqlBaseParser.NULL)
else:
raise NoViableAltException(self)
elif la_ == 16:
localctx = SqlBaseParser.RenameTableContext(self, localctx)
self.enterOuterAlt(localctx, 16)
self.state = 348
self.match(SqlBaseParser.ALTER)
self.state = 349
self.match(SqlBaseParser.TABLE)
self.state = 352
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,21,self._ctx)
if la_ == 1:
self.state = 350
self.match(SqlBaseParser.IF)
self.state = 351
self.match(SqlBaseParser.EXISTS)
self.state = 354
localctx.from_ = self.qualifiedName()
self.state = 355
self.match(SqlBaseParser.RENAME)
self.state = 356
self.match(SqlBaseParser.TO)
self.state = 357
localctx.to = self.qualifiedName()
elif la_ == 17:
localctx = SqlBaseParser.AddColumnContext(self, localctx)
self.enterOuterAlt(localctx, 17)
self.state = 359
self.match(SqlBaseParser.ALTER)
self.state = 360
self.match(SqlBaseParser.TABLE)
self.state = 363
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,22,self._ctx)
if la_ == 1:
self.state = 361
self.match(SqlBaseParser.IF)
self.state = 362
self.match(SqlBaseParser.EXISTS)
self.state = 365
localctx.tableName = self.qualifiedName()
self.state = 366
self.match(SqlBaseParser.ADD)
self.state = 367
self.match(SqlBaseParser.COLUMN)
self.state = 371
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,23,self._ctx)
if la_ == 1:
self.state = 368
self.match(SqlBaseParser.IF)
self.state = 369
self.match(SqlBaseParser.NOT)
self.state = 370
self.match(SqlBaseParser.EXISTS)
self.state = 373
localctx.column = self.columnDefinition()
elif la_ == 18:
localctx = SqlBaseParser.RenameColumnContext(self, localctx)
self.enterOuterAlt(localctx, 18)
self.state = 375
self.match(SqlBaseParser.ALTER)
self.state = 376
self.match(SqlBaseParser.TABLE)
self.state = 379
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,24,self._ctx)
if la_ == 1:
self.state = 377
self.match(SqlBaseParser.IF)
self.state = 378
self.match(SqlBaseParser.EXISTS)
self.state = 381
localctx.tableName = self.qualifiedName()
self.state = 382
self.match(SqlBaseParser.RENAME)
self.state = 383
self.match(SqlBaseParser.COLUMN)
self.state = 386
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,25,self._ctx)
if la_ == 1:
self.state = 384
self.match(SqlBaseParser.IF)
self.state = 385
self.match(SqlBaseParser.EXISTS)
self.state = 388
localctx.from_ = self.identifier()
self.state = 389
self.match(SqlBaseParser.TO)
self.state = 390
localctx.to = self.identifier()
elif la_ == 19:
localctx = SqlBaseParser.DropColumnContext(self, localctx)
self.enterOuterAlt(localctx, 19)
self.state = 392
self.match(SqlBaseParser.ALTER)
self.state = 393
self.match(SqlBaseParser.TABLE)
self.state = 396
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,26,self._ctx)
if la_ == 1:
self.state = 394
self.match(SqlBaseParser.IF)
self.state = 395
self.match(SqlBaseParser.EXISTS)
self.state = 398
localctx.tableName = self.qualifiedName()
self.state = 399
self.match(SqlBaseParser.DROP)
self.state = 400
self.match(SqlBaseParser.COLUMN)
self.state = 403
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,27,self._ctx)
if la_ == 1:
self.state = 401
self.match(SqlBaseParser.IF)
self.state = 402
self.match(SqlBaseParser.EXISTS)
self.state = 405
localctx.column = self.qualifiedName()
elif la_ == 20:
localctx = SqlBaseParser.SetTableAuthorizationContext(self, localctx)
self.enterOuterAlt(localctx, 20)
self.state = 407
self.match(SqlBaseParser.ALTER)
self.state = 408
self.match(SqlBaseParser.TABLE)
self.state = 409
localctx.tableName = self.qualifiedName()
self.state = 410
self.match(SqlBaseParser.SET)
self.state = 411
self.match(SqlBaseParser.AUTHORIZATION)
self.state = 412
self.principal()
elif la_ == 21:
localctx = SqlBaseParser.SetTablePropertiesContext(self, localctx)
self.enterOuterAlt(localctx, 21)
self.state = 414
self.match(SqlBaseParser.ALTER)
self.state = 415
self.match(SqlBaseParser.TABLE)
self.state = 416
localctx.tableName = self.qualifiedName()
self.state = 417
self.match(SqlBaseParser.SET)
self.state = 418
self.match(SqlBaseParser.PROPERTIES)
self.state = 419
self.propertyAssignments()
elif la_ == 22:
localctx = SqlBaseParser.TableExecuteContext(self, localctx)
self.enterOuterAlt(localctx, 22)
self.state = 421
self.match(SqlBaseParser.ALTER)
self.state = 422
self.match(SqlBaseParser.TABLE)
self.state = 423
localctx.tableName = self.qualifiedName()
self.state = 424
self.match(SqlBaseParser.EXECUTE)
self.state = 425
localctx.procedureName = self.identifier()
self.state = 438
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 426
self.match(SqlBaseParser.T__1)
self.state = 435
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 427
self.callArgument()
self.state = 432
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 428
self.match(SqlBaseParser.T__3)
self.state = 429
self.callArgument()
self.state = 434
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 437
self.match(SqlBaseParser.T__2)
self.state = 442
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WHERE:
self.state = 440
self.match(SqlBaseParser.WHERE)
self.state = 441
localctx.where = self.booleanExpression(0)
elif la_ == 23:
localctx = SqlBaseParser.AnalyzeContext(self, localctx)
self.enterOuterAlt(localctx, 23)
self.state = 444
self.match(SqlBaseParser.ANALYZE)
self.state = 445
self.qualifiedName()
self.state = 448
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 446
self.match(SqlBaseParser.WITH)
self.state = 447
self.properties()
elif la_ == 24:
localctx = SqlBaseParser.CreateMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 24)
self.state = 450
self.match(SqlBaseParser.CREATE)
self.state = 453
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OR:
self.state = 451
self.match(SqlBaseParser.OR)
self.state = 452
self.match(SqlBaseParser.REPLACE)
self.state = 455
self.match(SqlBaseParser.MATERIALIZED)
self.state = 456
self.match(SqlBaseParser.VIEW)
self.state = 460
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,34,self._ctx)
if la_ == 1:
self.state = 457
self.match(SqlBaseParser.IF)
self.state = 458
self.match(SqlBaseParser.NOT)
self.state = 459
self.match(SqlBaseParser.EXISTS)
self.state = 462
self.qualifiedName()
self.state = 465
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 463
self.match(SqlBaseParser.COMMENT)
self.state = 464
self.string()
self.state = 469
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 467
self.match(SqlBaseParser.WITH)
self.state = 468
self.properties()
self.state = 471
self.match(SqlBaseParser.AS)
self.state = 472
self.query()
elif la_ == 25:
localctx = SqlBaseParser.CreateViewContext(self, localctx)
self.enterOuterAlt(localctx, 25)
self.state = 474
self.match(SqlBaseParser.CREATE)
self.state = 477
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OR:
self.state = 475
self.match(SqlBaseParser.OR)
self.state = 476
self.match(SqlBaseParser.REPLACE)
self.state = 479
self.match(SqlBaseParser.VIEW)
self.state = 480
self.qualifiedName()
self.state = 483
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 481
self.match(SqlBaseParser.COMMENT)
self.state = 482
self.string()
self.state = 487
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.SECURITY:
self.state = 485
self.match(SqlBaseParser.SECURITY)
self.state = 486
_la = self._input.LA(1)
if not(_la==SqlBaseParser.DEFINER or _la==SqlBaseParser.INVOKER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 489
self.match(SqlBaseParser.AS)
self.state = 490
self.query()
elif la_ == 26:
localctx = SqlBaseParser.RefreshMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 26)
self.state = 492
self.match(SqlBaseParser.REFRESH)
self.state = 493
self.match(SqlBaseParser.MATERIALIZED)
self.state = 494
self.match(SqlBaseParser.VIEW)
self.state = 495
self.qualifiedName()
elif la_ == 27:
localctx = SqlBaseParser.DropMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 27)
self.state = 496
self.match(SqlBaseParser.DROP)
self.state = 497
self.match(SqlBaseParser.MATERIALIZED)
self.state = 498
self.match(SqlBaseParser.VIEW)
self.state = 501
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,40,self._ctx)
if la_ == 1:
self.state = 499
self.match(SqlBaseParser.IF)
self.state = 500
self.match(SqlBaseParser.EXISTS)
self.state = 503
self.qualifiedName()
elif la_ == 28:
localctx = SqlBaseParser.RenameMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 28)
self.state = 504
self.match(SqlBaseParser.ALTER)
self.state = 505
self.match(SqlBaseParser.MATERIALIZED)
self.state = 506
self.match(SqlBaseParser.VIEW)
self.state = 509
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,41,self._ctx)
if la_ == 1:
self.state = 507
self.match(SqlBaseParser.IF)
self.state = 508
self.match(SqlBaseParser.EXISTS)
self.state = 511
localctx.from_ = self.qualifiedName()
self.state = 512
self.match(SqlBaseParser.RENAME)
self.state = 513
self.match(SqlBaseParser.TO)
self.state = 514
localctx.to = self.qualifiedName()
elif la_ == 29:
localctx = SqlBaseParser.DropViewContext(self, localctx)
self.enterOuterAlt(localctx, 29)
self.state = 516
self.match(SqlBaseParser.DROP)
self.state = 517
self.match(SqlBaseParser.VIEW)
self.state = 520
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,42,self._ctx)
if la_ == 1:
self.state = 518
self.match(SqlBaseParser.IF)
self.state = 519
self.match(SqlBaseParser.EXISTS)
self.state = 522
self.qualifiedName()
elif la_ == 30:
localctx = SqlBaseParser.RenameViewContext(self, localctx)
self.enterOuterAlt(localctx, 30)
self.state = 523
self.match(SqlBaseParser.ALTER)
self.state = 524
self.match(SqlBaseParser.VIEW)
self.state = 525
localctx.from_ = self.qualifiedName()
self.state = 526
self.match(SqlBaseParser.RENAME)
self.state = 527
self.match(SqlBaseParser.TO)
self.state = 528
localctx.to = self.qualifiedName()
elif la_ == 31:
localctx = SqlBaseParser.SetViewAuthorizationContext(self, localctx)
self.enterOuterAlt(localctx, 31)
self.state = 530
self.match(SqlBaseParser.ALTER)
self.state = 531
self.match(SqlBaseParser.VIEW)
self.state = 532
localctx.from_ = self.qualifiedName()
self.state = 533
self.match(SqlBaseParser.SET)
self.state = 534
self.match(SqlBaseParser.AUTHORIZATION)
self.state = 535
self.principal()
elif la_ == 32:
localctx = SqlBaseParser.CallContext(self, localctx)
self.enterOuterAlt(localctx, 32)
self.state = 537
self.match(SqlBaseParser.CALL)
self.state = 538
self.qualifiedName()
self.state = 539
self.match(SqlBaseParser.T__1)
self.state = 548
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 540
self.callArgument()
self.state = 545
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 541
self.match(SqlBaseParser.T__3)
self.state = 542
self.callArgument()
self.state = 547
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 550
self.match(SqlBaseParser.T__2)
elif la_ == 33:
localctx = SqlBaseParser.CreateRoleContext(self, localctx)
self.enterOuterAlt(localctx, 33)
self.state = 552
self.match(SqlBaseParser.CREATE)
self.state = 553
self.match(SqlBaseParser.ROLE)
self.state = 554
localctx.name = self.identifier()
self.state = 558
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 555
self.match(SqlBaseParser.WITH)
self.state = 556
self.match(SqlBaseParser.ADMIN)
self.state = 557
self.grantor()
self.state = 562
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 560
self.match(SqlBaseParser.IN)
self.state = 561
localctx.catalog = self.identifier()
elif la_ == 34:
localctx = SqlBaseParser.DropRoleContext(self, localctx)
self.enterOuterAlt(localctx, 34)
self.state = 564
self.match(SqlBaseParser.DROP)
self.state = 565
self.match(SqlBaseParser.ROLE)
self.state = 566
localctx.name = self.identifier()
self.state = 569
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 567
self.match(SqlBaseParser.IN)
self.state = 568
localctx.catalog = self.identifier()
elif la_ == 35:
localctx = SqlBaseParser.GrantRolesContext(self, localctx)
self.enterOuterAlt(localctx, 35)
self.state = 571
self.match(SqlBaseParser.GRANT)
self.state = 572
self.roles()
self.state = 573
self.match(SqlBaseParser.TO)
self.state = 574
self.principal()
self.state = 579
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 575
self.match(SqlBaseParser.T__3)
self.state = 576
self.principal()
self.state = 581
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 585
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 582
self.match(SqlBaseParser.WITH)
self.state = 583
self.match(SqlBaseParser.ADMIN)
self.state = 584
self.match(SqlBaseParser.OPTION)
self.state = 590
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.GRANTED:
self.state = 587
self.match(SqlBaseParser.GRANTED)
self.state = 588
self.match(SqlBaseParser.BY)
self.state = 589
self.grantor()
self.state = 594
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 592
self.match(SqlBaseParser.IN)
self.state = 593
localctx.catalog = self.identifier()
elif la_ == 36:
localctx = SqlBaseParser.RevokeRolesContext(self, localctx)
self.enterOuterAlt(localctx, 36)
self.state = 596
self.match(SqlBaseParser.REVOKE)
self.state = 600
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,52,self._ctx)
if la_ == 1:
self.state = 597
self.match(SqlBaseParser.ADMIN)
self.state = 598
self.match(SqlBaseParser.OPTION)
self.state = 599
self.match(SqlBaseParser.FOR)
self.state = 602
self.roles()
self.state = 603
self.match(SqlBaseParser.FROM)
self.state = 604
self.principal()
self.state = 609
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 605
self.match(SqlBaseParser.T__3)
self.state = 606
self.principal()
self.state = 611
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 615
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.GRANTED:
self.state = 612
self.match(SqlBaseParser.GRANTED)
self.state = 613
self.match(SqlBaseParser.BY)
self.state = 614
self.grantor()
self.state = 619
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 617
self.match(SqlBaseParser.IN)
self.state = 618
localctx.catalog = self.identifier()
elif la_ == 37:
localctx = SqlBaseParser.SetRoleContext(self, localctx)
self.enterOuterAlt(localctx, 37)
self.state = 621
self.match(SqlBaseParser.SET)
self.state = 622
self.match(SqlBaseParser.ROLE)
self.state = 626
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,56,self._ctx)
if la_ == 1:
self.state = 623
self.match(SqlBaseParser.ALL)
elif la_ == 2:
self.state = 624
self.match(SqlBaseParser.NONE)
elif la_ == 3:
self.state = 625
localctx.role = self.identifier()
self.state = 630
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IN:
self.state = 628
self.match(SqlBaseParser.IN)
self.state = 629
localctx.catalog = self.identifier()
elif la_ == 38:
localctx = SqlBaseParser.GrantContext(self, localctx)
self.enterOuterAlt(localctx, 38)
self.state = 632
self.match(SqlBaseParser.GRANT)
self.state = 643
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.CREATE, SqlBaseParser.DELETE, SqlBaseParser.INSERT, SqlBaseParser.SELECT, SqlBaseParser.UPDATE]:
self.state = 633
self.privilege()
self.state = 638
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 634
self.match(SqlBaseParser.T__3)
self.state = 635
self.privilege()
self.state = 640
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [SqlBaseParser.ALL]:
self.state = 641
self.match(SqlBaseParser.ALL)
self.state = 642
self.match(SqlBaseParser.PRIVILEGES)
else:
raise NoViableAltException(self)
self.state = 645
self.match(SqlBaseParser.ON)
self.state = 647
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,60,self._ctx)
if la_ == 1:
self.state = 646
_la = self._input.LA(1)
if not(_la==SqlBaseParser.SCHEMA or _la==SqlBaseParser.TABLE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 649
self.qualifiedName()
self.state = 650
self.match(SqlBaseParser.TO)
self.state = 651
localctx.grantee = self.principal()
self.state = 655
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 652
self.match(SqlBaseParser.WITH)
self.state = 653
self.match(SqlBaseParser.GRANT)
self.state = 654
self.match(SqlBaseParser.OPTION)
elif la_ == 39:
localctx = SqlBaseParser.DenyContext(self, localctx)
self.enterOuterAlt(localctx, 39)
self.state = 657
self.match(SqlBaseParser.DENY)
self.state = 668
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.CREATE, SqlBaseParser.DELETE, SqlBaseParser.INSERT, SqlBaseParser.SELECT, SqlBaseParser.UPDATE]:
self.state = 658
self.privilege()
self.state = 663
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 659
self.match(SqlBaseParser.T__3)
self.state = 660
self.privilege()
self.state = 665
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [SqlBaseParser.ALL]:
self.state = 666
self.match(SqlBaseParser.ALL)
self.state = 667
self.match(SqlBaseParser.PRIVILEGES)
else:
raise NoViableAltException(self)
self.state = 670
self.match(SqlBaseParser.ON)
self.state = 672
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,64,self._ctx)
if la_ == 1:
self.state = 671
_la = self._input.LA(1)
if not(_la==SqlBaseParser.SCHEMA or _la==SqlBaseParser.TABLE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 674
self.qualifiedName()
self.state = 675
self.match(SqlBaseParser.TO)
self.state = 676
localctx.grantee = self.principal()
elif la_ == 40:
localctx = SqlBaseParser.RevokeContext(self, localctx)
self.enterOuterAlt(localctx, 40)
self.state = 678
self.match(SqlBaseParser.REVOKE)
self.state = 682
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.GRANT:
self.state = 679
self.match(SqlBaseParser.GRANT)
self.state = 680
self.match(SqlBaseParser.OPTION)
self.state = 681
self.match(SqlBaseParser.FOR)
self.state = 694
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.CREATE, SqlBaseParser.DELETE, SqlBaseParser.INSERT, SqlBaseParser.SELECT, SqlBaseParser.UPDATE]:
self.state = 684
self.privilege()
self.state = 689
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 685
self.match(SqlBaseParser.T__3)
self.state = 686
self.privilege()
self.state = 691
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [SqlBaseParser.ALL]:
self.state = 692
self.match(SqlBaseParser.ALL)
self.state = 693
self.match(SqlBaseParser.PRIVILEGES)
else:
raise NoViableAltException(self)
self.state = 696
self.match(SqlBaseParser.ON)
self.state = 698
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,68,self._ctx)
if la_ == 1:
self.state = 697
_la = self._input.LA(1)
if not(_la==SqlBaseParser.SCHEMA or _la==SqlBaseParser.TABLE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 700
self.qualifiedName()
self.state = 701
self.match(SqlBaseParser.FROM)
self.state = 702
localctx.grantee = self.principal()
elif la_ == 41:
localctx = SqlBaseParser.ShowGrantsContext(self, localctx)
self.enterOuterAlt(localctx, 41)
self.state = 704
self.match(SqlBaseParser.SHOW)
self.state = 705
self.match(SqlBaseParser.GRANTS)
self.state = 711
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ON:
self.state = 706
self.match(SqlBaseParser.ON)
self.state = 708
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.TABLE:
self.state = 707
self.match(SqlBaseParser.TABLE)
self.state = 710
self.qualifiedName()
elif la_ == 42:
localctx = SqlBaseParser.ExplainContext(self, localctx)
self.enterOuterAlt(localctx, 42)
self.state = 713
self.match(SqlBaseParser.EXPLAIN)
self.state = 725
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,72,self._ctx)
if la_ == 1:
self.state = 714
self.match(SqlBaseParser.T__1)
self.state = 715
self.explainOption()
self.state = 720
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 716
self.match(SqlBaseParser.T__3)
self.state = 717
self.explainOption()
self.state = 722
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 723
self.match(SqlBaseParser.T__2)
self.state = 727
self.statement()
elif la_ == 43:
localctx = SqlBaseParser.ExplainAnalyzeContext(self, localctx)
self.enterOuterAlt(localctx, 43)
self.state = 728
self.match(SqlBaseParser.EXPLAIN)
self.state = 729
self.match(SqlBaseParser.ANALYZE)
self.state = 731
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.VERBOSE:
self.state = 730
self.match(SqlBaseParser.VERBOSE)
self.state = 733
self.statement()
elif la_ == 44:
localctx = SqlBaseParser.ShowCreateTableContext(self, localctx)
self.enterOuterAlt(localctx, 44)
self.state = 734
self.match(SqlBaseParser.SHOW)
self.state = 735
self.match(SqlBaseParser.CREATE)
self.state = 736
self.match(SqlBaseParser.TABLE)
self.state = 737
self.qualifiedName()
elif la_ == 45:
localctx = SqlBaseParser.ShowCreateSchemaContext(self, localctx)
self.enterOuterAlt(localctx, 45)
self.state = 738
self.match(SqlBaseParser.SHOW)
self.state = 739
self.match(SqlBaseParser.CREATE)
self.state = 740
self.match(SqlBaseParser.SCHEMA)
self.state = 741
self.qualifiedName()
elif la_ == 46:
localctx = SqlBaseParser.ShowCreateViewContext(self, localctx)
self.enterOuterAlt(localctx, 46)
self.state = 742
self.match(SqlBaseParser.SHOW)
self.state = 743
self.match(SqlBaseParser.CREATE)
self.state = 744
self.match(SqlBaseParser.VIEW)
self.state = 745
self.qualifiedName()
elif la_ == 47:
localctx = SqlBaseParser.ShowCreateMaterializedViewContext(self, localctx)
self.enterOuterAlt(localctx, 47)
self.state = 746
self.match(SqlBaseParser.SHOW)
self.state = 747
self.match(SqlBaseParser.CREATE)
self.state = 748
self.match(SqlBaseParser.MATERIALIZED)
self.state = 749
self.match(SqlBaseParser.VIEW)
self.state = 750
self.qualifiedName()
elif la_ == 48:
localctx = SqlBaseParser.ShowTablesContext(self, localctx)
self.enterOuterAlt(localctx, 48)
self.state = 751
self.match(SqlBaseParser.SHOW)
self.state = 752
self.match(SqlBaseParser.TABLES)
self.state = 755
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FROM or _la==SqlBaseParser.IN:
self.state = 753
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 754
self.qualifiedName()
self.state = 763
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 757
self.match(SqlBaseParser.LIKE)
self.state = 758
localctx.pattern = self.string()
self.state = 761
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 759
self.match(SqlBaseParser.ESCAPE)
self.state = 760
localctx.escape = self.string()
elif la_ == 49:
localctx = SqlBaseParser.ShowSchemasContext(self, localctx)
self.enterOuterAlt(localctx, 49)
self.state = 765
self.match(SqlBaseParser.SHOW)
self.state = 766
self.match(SqlBaseParser.SCHEMAS)
self.state = 769
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FROM or _la==SqlBaseParser.IN:
self.state = 767
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 768
self.identifier()
self.state = 777
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 771
self.match(SqlBaseParser.LIKE)
self.state = 772
localctx.pattern = self.string()
self.state = 775
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 773
self.match(SqlBaseParser.ESCAPE)
self.state = 774
localctx.escape = self.string()
elif la_ == 50:
localctx = SqlBaseParser.ShowCatalogsContext(self, localctx)
self.enterOuterAlt(localctx, 50)
self.state = 779
self.match(SqlBaseParser.SHOW)
self.state = 780
self.match(SqlBaseParser.CATALOGS)
self.state = 787
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 781
self.match(SqlBaseParser.LIKE)
self.state = 782
localctx.pattern = self.string()
self.state = 785
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 783
self.match(SqlBaseParser.ESCAPE)
self.state = 784
localctx.escape = self.string()
elif la_ == 51:
localctx = SqlBaseParser.ShowColumnsContext(self, localctx)
self.enterOuterAlt(localctx, 51)
self.state = 789
self.match(SqlBaseParser.SHOW)
self.state = 790
self.match(SqlBaseParser.COLUMNS)
self.state = 791
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 793
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 792
self.qualifiedName()
self.state = 801
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 795
self.match(SqlBaseParser.LIKE)
self.state = 796
localctx.pattern = self.string()
self.state = 799
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 797
self.match(SqlBaseParser.ESCAPE)
self.state = 798
localctx.escape = self.string()
elif la_ == 52:
localctx = SqlBaseParser.ShowStatsContext(self, localctx)
self.enterOuterAlt(localctx, 52)
self.state = 803
self.match(SqlBaseParser.SHOW)
self.state = 804
self.match(SqlBaseParser.STATS)
self.state = 805
self.match(SqlBaseParser.FOR)
self.state = 806
self.qualifiedName()
elif la_ == 53:
localctx = SqlBaseParser.ShowStatsForQueryContext(self, localctx)
self.enterOuterAlt(localctx, 53)
self.state = 807
self.match(SqlBaseParser.SHOW)
self.state = 808
self.match(SqlBaseParser.STATS)
self.state = 809
self.match(SqlBaseParser.FOR)
self.state = 810
self.match(SqlBaseParser.T__1)
self.state = 811
self.query()
self.state = 812
self.match(SqlBaseParser.T__2)
elif la_ == 54:
localctx = SqlBaseParser.ShowRolesContext(self, localctx)
self.enterOuterAlt(localctx, 54)
self.state = 814
self.match(SqlBaseParser.SHOW)
self.state = 816
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.CURRENT:
self.state = 815
self.match(SqlBaseParser.CURRENT)
self.state = 818
self.match(SqlBaseParser.ROLES)
self.state = 821
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FROM or _la==SqlBaseParser.IN:
self.state = 819
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 820
self.identifier()
elif la_ == 55:
localctx = SqlBaseParser.ShowRoleGrantsContext(self, localctx)
self.enterOuterAlt(localctx, 55)
self.state = 823
self.match(SqlBaseParser.SHOW)
self.state = 824
self.match(SqlBaseParser.ROLE)
self.state = 825
self.match(SqlBaseParser.GRANTS)
self.state = 828
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FROM or _la==SqlBaseParser.IN:
self.state = 826
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FROM or _la==SqlBaseParser.IN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 827
self.identifier()
elif la_ == 56:
localctx = SqlBaseParser.ShowColumnsContext(self, localctx)
self.enterOuterAlt(localctx, 56)
self.state = 830
self.match(SqlBaseParser.DESCRIBE)
self.state = 831
self.qualifiedName()
elif la_ == 57:
localctx = SqlBaseParser.ShowColumnsContext(self, localctx)
self.enterOuterAlt(localctx, 57)
self.state = 832
self.match(SqlBaseParser.DESC)
self.state = 833
self.qualifiedName()
elif la_ == 58:
localctx = SqlBaseParser.ShowFunctionsContext(self, localctx)
self.enterOuterAlt(localctx, 58)
self.state = 834
self.match(SqlBaseParser.SHOW)
self.state = 835
self.match(SqlBaseParser.FUNCTIONS)
self.state = 842
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 836
self.match(SqlBaseParser.LIKE)
self.state = 837
localctx.pattern = self.string()
self.state = 840
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 838
self.match(SqlBaseParser.ESCAPE)
self.state = 839
localctx.escape = self.string()
elif la_ == 59:
localctx = SqlBaseParser.ShowSessionContext(self, localctx)
self.enterOuterAlt(localctx, 59)
self.state = 844
self.match(SqlBaseParser.SHOW)
self.state = 845
self.match(SqlBaseParser.SESSION)
self.state = 852
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.LIKE:
self.state = 846
self.match(SqlBaseParser.LIKE)
self.state = 847
localctx.pattern = self.string()
self.state = 850
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ESCAPE:
self.state = 848
self.match(SqlBaseParser.ESCAPE)
self.state = 849
localctx.escape = self.string()
elif la_ == 60:
localctx = SqlBaseParser.SetSessionContext(self, localctx)
self.enterOuterAlt(localctx, 60)
self.state = 854
self.match(SqlBaseParser.SET)
self.state = 855
self.match(SqlBaseParser.SESSION)
self.state = 856
self.qualifiedName()
self.state = 857
self.match(SqlBaseParser.EQ)
self.state = 858
self.expression()
elif la_ == 61:
localctx = SqlBaseParser.ResetSessionContext(self, localctx)
self.enterOuterAlt(localctx, 61)
self.state = 860
self.match(SqlBaseParser.RESET)
self.state = 861
self.match(SqlBaseParser.SESSION)
self.state = 862
self.qualifiedName()
elif la_ == 62:
localctx = SqlBaseParser.StartTransactionContext(self, localctx)
self.enterOuterAlt(localctx, 62)
self.state = 863
self.match(SqlBaseParser.START)
self.state = 864
self.match(SqlBaseParser.TRANSACTION)
self.state = 873
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ISOLATION or _la==SqlBaseParser.READ:
self.state = 865
self.transactionMode()
self.state = 870
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 866
self.match(SqlBaseParser.T__3)
self.state = 867
self.transactionMode()
self.state = 872
self._errHandler.sync(self)
_la = self._input.LA(1)
elif la_ == 63:
localctx = SqlBaseParser.CommitContext(self, localctx)
self.enterOuterAlt(localctx, 63)
self.state = 875
self.match(SqlBaseParser.COMMIT)
self.state = 877
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WORK:
self.state = 876
self.match(SqlBaseParser.WORK)
elif la_ == 64:
localctx = SqlBaseParser.RollbackContext(self, localctx)
self.enterOuterAlt(localctx, 64)
self.state = 879
self.match(SqlBaseParser.ROLLBACK)
self.state = 881
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WORK:
self.state = 880
self.match(SqlBaseParser.WORK)
elif la_ == 65:
localctx = SqlBaseParser.PrepareContext(self, localctx)
self.enterOuterAlt(localctx, 65)
self.state = 883
self.match(SqlBaseParser.PREPARE)
self.state = 884
self.identifier()
self.state = 885
self.match(SqlBaseParser.FROM)
self.state = 886
self.statement()
elif la_ == 66:
localctx = SqlBaseParser.DeallocateContext(self, localctx)
self.enterOuterAlt(localctx, 66)
self.state = 888
self.match(SqlBaseParser.DEALLOCATE)
self.state = 889
self.match(SqlBaseParser.PREPARE)
self.state = 890
self.identifier()
elif la_ == 67:
localctx = SqlBaseParser.ExecuteContext(self, localctx)
self.enterOuterAlt(localctx, 67)
self.state = 891
self.match(SqlBaseParser.EXECUTE)
self.state = 892
self.identifier()
self.state = 902
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.USING:
self.state = 893
self.match(SqlBaseParser.USING)
self.state = 894
self.expression()
self.state = 899
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 895
self.match(SqlBaseParser.T__3)
self.state = 896
self.expression()
self.state = 901
self._errHandler.sync(self)
_la = self._input.LA(1)
elif la_ == 68:
localctx = SqlBaseParser.DescribeInputContext(self, localctx)
self.enterOuterAlt(localctx, 68)
self.state = 904
self.match(SqlBaseParser.DESCRIBE)
self.state = 905
self.match(SqlBaseParser.INPUT)
self.state = 906
self.identifier()
elif la_ == 69:
localctx = SqlBaseParser.DescribeOutputContext(self, localctx)
self.enterOuterAlt(localctx, 69)
self.state = 907
self.match(SqlBaseParser.DESCRIBE)
self.state = 908
self.match(SqlBaseParser.OUTPUT)
self.state = 909
self.identifier()
elif la_ == 70:
localctx = SqlBaseParser.SetPathContext(self, localctx)
self.enterOuterAlt(localctx, 70)
self.state = 910
self.match(SqlBaseParser.SET)
self.state = 911
self.match(SqlBaseParser.PATH)
self.state = 912
self.pathSpecification()
elif la_ == 71:
localctx = SqlBaseParser.SetTimeZoneContext(self, localctx)
self.enterOuterAlt(localctx, 71)
self.state = 913
self.match(SqlBaseParser.SET)
self.state = 914
self.match(SqlBaseParser.TIME)
self.state = 915
self.match(SqlBaseParser.ZONE)
self.state = 918
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,98,self._ctx)
if la_ == 1:
self.state = 916
self.match(SqlBaseParser.LOCAL)
elif la_ == 2:
self.state = 917
self.expression()
elif la_ == 72:
localctx = SqlBaseParser.UpdateContext(self, localctx)
self.enterOuterAlt(localctx, 72)
self.state = 920
self.match(SqlBaseParser.UPDATE)
self.state = 921
self.qualifiedName()
self.state = 922
self.match(SqlBaseParser.SET)
self.state = 923
self.updateAssignment()
self.state = 928
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 924
self.match(SqlBaseParser.T__3)
self.state = 925
self.updateAssignment()
self.state = 930
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 933
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WHERE:
self.state = 931
self.match(SqlBaseParser.WHERE)
self.state = 932
localctx.where = self.booleanExpression(0)
elif la_ == 73:
localctx = SqlBaseParser.MergeContext(self, localctx)
self.enterOuterAlt(localctx, 73)
self.state = 935
self.match(SqlBaseParser.MERGE)
self.state = 936
self.match(SqlBaseParser.INTO)
self.state = 937
self.qualifiedName()
self.state = 942
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.AS) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 939
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AS:
self.state = 938
self.match(SqlBaseParser.AS)
self.state = 941
self.identifier()
self.state = 944
self.match(SqlBaseParser.USING)
self.state = 945
self.relation(0)
self.state = 946
self.match(SqlBaseParser.ON)
self.state = 947
self.expression()
self.state = 949
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 948
self.mergeCase()
self.state = 951
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==SqlBaseParser.WHEN):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def queryNoWith(self):
return self.getTypedRuleContext(SqlBaseParser.QueryNoWithContext,0)
def with_(self):
return self.getTypedRuleContext(SqlBaseParser.With_Context,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_query
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuery" ):
listener.enterQuery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuery" ):
listener.exitQuery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuery" ):
return visitor.visitQuery(self)
else:
return visitor.visitChildren(self)
def query(self):
localctx = SqlBaseParser.QueryContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_query)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 956
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 955
self.with_()
self.state = 958
self.queryNoWith()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class With_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def namedQuery(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.NamedQueryContext)
else:
return self.getTypedRuleContext(SqlBaseParser.NamedQueryContext,i)
def RECURSIVE(self):
return self.getToken(SqlBaseParser.RECURSIVE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_with_
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWith_" ):
listener.enterWith_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWith_" ):
listener.exitWith_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWith_" ):
return visitor.visitWith_(self)
else:
return visitor.visitChildren(self)
def with_(self):
localctx = SqlBaseParser.With_Context(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_with_)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 960
self.match(SqlBaseParser.WITH)
self.state = 962
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.RECURSIVE:
self.state = 961
self.match(SqlBaseParser.RECURSIVE)
self.state = 964
self.namedQuery()
self.state = 969
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 965
self.match(SqlBaseParser.T__3)
self.state = 966
self.namedQuery()
self.state = 971
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TableElementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def columnDefinition(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnDefinitionContext,0)
def likeClause(self):
return self.getTypedRuleContext(SqlBaseParser.LikeClauseContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_tableElement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTableElement" ):
listener.enterTableElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTableElement" ):
listener.exitTableElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTableElement" ):
return visitor.visitTableElement(self)
else:
return visitor.visitChildren(self)
def tableElement(self):
localctx = SqlBaseParser.TableElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_tableElement)
try:
self.state = 974
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 972
self.columnDefinition()
elif token in [SqlBaseParser.LIKE]:
self.enterOuterAlt(localctx, 2)
self.state = 973
self.likeClause()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ColumnDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def properties(self):
return self.getTypedRuleContext(SqlBaseParser.PropertiesContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_columnDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterColumnDefinition" ):
listener.enterColumnDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitColumnDefinition" ):
listener.exitColumnDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitColumnDefinition" ):
return visitor.visitColumnDefinition(self)
else:
return visitor.visitChildren(self)
def columnDefinition(self):
localctx = SqlBaseParser.ColumnDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_columnDefinition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 976
self.identifier()
self.state = 977
self.type_(0)
self.state = 980
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 978
self.match(SqlBaseParser.NOT)
self.state = 979
self.match(SqlBaseParser.NULL)
self.state = 984
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.COMMENT:
self.state = 982
self.match(SqlBaseParser.COMMENT)
self.state = 983
self.string()
self.state = 988
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.WITH:
self.state = 986
self.match(SqlBaseParser.WITH)
self.state = 987
self.properties()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LikeClauseContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.optionType = None # Token
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def PROPERTIES(self):
return self.getToken(SqlBaseParser.PROPERTIES, 0)
def INCLUDING(self):
return self.getToken(SqlBaseParser.INCLUDING, 0)
def EXCLUDING(self):
return self.getToken(SqlBaseParser.EXCLUDING, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_likeClause
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLikeClause" ):
listener.enterLikeClause(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLikeClause" ):
listener.exitLikeClause(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLikeClause" ):
return visitor.visitLikeClause(self)
else:
return visitor.visitChildren(self)
def likeClause(self):
localctx = SqlBaseParser.LikeClauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_likeClause)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 990
self.match(SqlBaseParser.LIKE)
self.state = 991
self.qualifiedName()
self.state = 994
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.EXCLUDING or _la==SqlBaseParser.INCLUDING:
self.state = 992
localctx.optionType = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.EXCLUDING or _la==SqlBaseParser.INCLUDING):
localctx.optionType = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 993
self.match(SqlBaseParser.PROPERTIES)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PropertiesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def propertyAssignments(self):
return self.getTypedRuleContext(SqlBaseParser.PropertyAssignmentsContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_properties
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProperties" ):
listener.enterProperties(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProperties" ):
listener.exitProperties(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProperties" ):
return visitor.visitProperties(self)
else:
return visitor.visitChildren(self)
def properties(self):
localctx = SqlBaseParser.PropertiesContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_properties)
try:
self.enterOuterAlt(localctx, 1)
self.state = 996
self.match(SqlBaseParser.T__1)
self.state = 997
self.propertyAssignments()
self.state = 998
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PropertyAssignmentsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def property_(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.Property_Context)
else:
return self.getTypedRuleContext(SqlBaseParser.Property_Context,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_propertyAssignments
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPropertyAssignments" ):
listener.enterPropertyAssignments(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPropertyAssignments" ):
listener.exitPropertyAssignments(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPropertyAssignments" ):
return visitor.visitPropertyAssignments(self)
else:
return visitor.visitChildren(self)
def propertyAssignments(self):
localctx = SqlBaseParser.PropertyAssignmentsContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_propertyAssignments)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1000
self.property_()
self.state = 1005
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1001
self.match(SqlBaseParser.T__3)
self.state = 1002
self.property_()
self.state = 1007
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Property_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_property_
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProperty_" ):
listener.enterProperty_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProperty_" ):
listener.exitProperty_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProperty_" ):
return visitor.visitProperty_(self)
else:
return visitor.visitChildren(self)
def property_(self):
localctx = SqlBaseParser.Property_Context(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_property_)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1008
self.identifier()
self.state = 1009
self.match(SqlBaseParser.EQ)
self.state = 1010
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryNoWithContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.offset = None # RowCountContext
self.limit = None # LimitRowCountContext
self.fetchFirst = None # RowCountContext
def queryTerm(self):
return self.getTypedRuleContext(SqlBaseParser.QueryTermContext,0)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def OFFSET(self):
return self.getToken(SqlBaseParser.OFFSET, 0)
def rowCount(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowCountContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowCountContext,i)
def LIMIT(self):
return self.getToken(SqlBaseParser.LIMIT, 0)
def FETCH(self):
return self.getToken(SqlBaseParser.FETCH, 0)
def limitRowCount(self):
return self.getTypedRuleContext(SqlBaseParser.LimitRowCountContext,0)
def FIRST(self):
return self.getToken(SqlBaseParser.FIRST, 0)
def NEXT(self):
return self.getToken(SqlBaseParser.NEXT, 0)
def ROW(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.ROW)
else:
return self.getToken(SqlBaseParser.ROW, i)
def ROWS(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.ROWS)
else:
return self.getToken(SqlBaseParser.ROWS, i)
def ONLY(self):
return self.getToken(SqlBaseParser.ONLY, 0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def TIES(self):
return self.getToken(SqlBaseParser.TIES, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_queryNoWith
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQueryNoWith" ):
listener.enterQueryNoWith(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQueryNoWith" ):
listener.exitQueryNoWith(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQueryNoWith" ):
return visitor.visitQueryNoWith(self)
else:
return visitor.visitChildren(self)
def queryNoWith(self):
localctx = SqlBaseParser.QueryNoWithContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_queryNoWith)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1012
self.queryTerm(0)
self.state = 1023
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ORDER:
self.state = 1013
self.match(SqlBaseParser.ORDER)
self.state = 1014
self.match(SqlBaseParser.BY)
self.state = 1015
self.sortItem()
self.state = 1020
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1016
self.match(SqlBaseParser.T__3)
self.state = 1017
self.sortItem()
self.state = 1022
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1030
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OFFSET:
self.state = 1025
self.match(SqlBaseParser.OFFSET)
self.state = 1026
localctx.offset = self.rowCount()
self.state = 1028
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ROW or _la==SqlBaseParser.ROWS:
self.state = 1027
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ROW or _la==SqlBaseParser.ROWS):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1045
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.LIMIT]:
self.state = 1032
self.match(SqlBaseParser.LIMIT)
self.state = 1033
localctx.limit = self.limitRowCount()
elif token in [SqlBaseParser.FETCH]:
self.state = 1034
self.match(SqlBaseParser.FETCH)
self.state = 1035
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FIRST or _la==SqlBaseParser.NEXT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1037
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.QUESTION_MARK or _la==SqlBaseParser.INTEGER_VALUE:
self.state = 1036
localctx.fetchFirst = self.rowCount()
self.state = 1039
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ROW or _la==SqlBaseParser.ROWS):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1043
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ONLY]:
self.state = 1040
self.match(SqlBaseParser.ONLY)
elif token in [SqlBaseParser.WITH]:
self.state = 1041
self.match(SqlBaseParser.WITH)
self.state = 1042
self.match(SqlBaseParser.TIES)
else:
raise NoViableAltException(self)
elif token in [SqlBaseParser.EOF, SqlBaseParser.T__2, SqlBaseParser.WITH]:
pass
else:
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LimitRowCountContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def rowCount(self):
return self.getTypedRuleContext(SqlBaseParser.RowCountContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_limitRowCount
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLimitRowCount" ):
listener.enterLimitRowCount(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLimitRowCount" ):
listener.exitLimitRowCount(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLimitRowCount" ):
return visitor.visitLimitRowCount(self)
else:
return visitor.visitChildren(self)
def limitRowCount(self):
localctx = SqlBaseParser.LimitRowCountContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_limitRowCount)
try:
self.state = 1049
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ALL]:
self.enterOuterAlt(localctx, 1)
self.state = 1047
self.match(SqlBaseParser.ALL)
elif token in [SqlBaseParser.QUESTION_MARK, SqlBaseParser.INTEGER_VALUE]:
self.enterOuterAlt(localctx, 2)
self.state = 1048
self.rowCount()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RowCountContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_rowCount
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowCount" ):
listener.enterRowCount(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowCount" ):
listener.exitRowCount(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowCount" ):
return visitor.visitRowCount(self)
else:
return visitor.visitChildren(self)
def rowCount(self):
localctx = SqlBaseParser.RowCountContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_rowCount)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1051
_la = self._input.LA(1)
if not(_la==SqlBaseParser.QUESTION_MARK or _la==SqlBaseParser.INTEGER_VALUE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryTermContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_queryTerm
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class QueryTermDefaultContext(QueryTermContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryTermContext
super().__init__(parser)
self.copyFrom(ctx)
def queryPrimary(self):
return self.getTypedRuleContext(SqlBaseParser.QueryPrimaryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQueryTermDefault" ):
listener.enterQueryTermDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQueryTermDefault" ):
listener.exitQueryTermDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQueryTermDefault" ):
return visitor.visitQueryTermDefault(self)
else:
return visitor.visitChildren(self)
class SetOperationContext(QueryTermContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryTermContext
super().__init__(parser)
self.left = None # QueryTermContext
self.operator = None # Token
self.right = None # QueryTermContext
self.copyFrom(ctx)
def queryTerm(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QueryTermContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QueryTermContext,i)
def INTERSECT(self):
return self.getToken(SqlBaseParser.INTERSECT, 0)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def UNION(self):
return self.getToken(SqlBaseParser.UNION, 0)
def EXCEPT(self):
return self.getToken(SqlBaseParser.EXCEPT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetOperation" ):
listener.enterSetOperation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetOperation" ):
listener.exitSetOperation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetOperation" ):
return visitor.visitSetOperation(self)
else:
return visitor.visitChildren(self)
def queryTerm(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.QueryTermContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 34
self.enterRecursionRule(localctx, 34, self.RULE_queryTerm, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
localctx = SqlBaseParser.QueryTermDefaultContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1054
self.queryPrimary()
self._ctx.stop = self._input.LT(-1)
self.state = 1070
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,125,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 1068
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,124,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.SetOperationContext(self, SqlBaseParser.QueryTermContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_queryTerm)
self.state = 1056
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 1057
localctx.operator = self.match(SqlBaseParser.INTERSECT)
self.state = 1059
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ALL or _la==SqlBaseParser.DISTINCT:
self.state = 1058
self.setQuantifier()
self.state = 1061
localctx.right = self.queryTerm(3)
elif la_ == 2:
localctx = SqlBaseParser.SetOperationContext(self, SqlBaseParser.QueryTermContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_queryTerm)
self.state = 1062
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1063
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.EXCEPT or _la==SqlBaseParser.UNION):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1065
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ALL or _la==SqlBaseParser.DISTINCT:
self.state = 1064
self.setQuantifier()
self.state = 1067
localctx.right = self.queryTerm(2)
self.state = 1072
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,125,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class QueryPrimaryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_queryPrimary
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SubqueryContext(QueryPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def queryNoWith(self):
return self.getTypedRuleContext(SqlBaseParser.QueryNoWithContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubquery" ):
listener.enterSubquery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubquery" ):
listener.exitSubquery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubquery" ):
return visitor.visitSubquery(self)
else:
return visitor.visitChildren(self)
class QueryPrimaryDefaultContext(QueryPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def querySpecification(self):
return self.getTypedRuleContext(SqlBaseParser.QuerySpecificationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQueryPrimaryDefault" ):
listener.enterQueryPrimaryDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQueryPrimaryDefault" ):
listener.exitQueryPrimaryDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQueryPrimaryDefault" ):
return visitor.visitQueryPrimaryDefault(self)
else:
return visitor.visitChildren(self)
class TableContext(QueryPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def TABLE(self):
return self.getToken(SqlBaseParser.TABLE, 0)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTable" ):
listener.enterTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTable" ):
listener.exitTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTable" ):
return visitor.visitTable(self)
else:
return visitor.visitChildren(self)
class InlineTableContext(QueryPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.QueryPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def VALUES(self):
return self.getToken(SqlBaseParser.VALUES, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInlineTable" ):
listener.enterInlineTable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInlineTable" ):
listener.exitInlineTable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInlineTable" ):
return visitor.visitInlineTable(self)
else:
return visitor.visitChildren(self)
def queryPrimary(self):
localctx = SqlBaseParser.QueryPrimaryContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_queryPrimary)
try:
self.state = 1089
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.SELECT]:
localctx = SqlBaseParser.QueryPrimaryDefaultContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1073
self.querySpecification()
elif token in [SqlBaseParser.TABLE]:
localctx = SqlBaseParser.TableContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1074
self.match(SqlBaseParser.TABLE)
self.state = 1075
self.qualifiedName()
elif token in [SqlBaseParser.VALUES]:
localctx = SqlBaseParser.InlineTableContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1076
self.match(SqlBaseParser.VALUES)
self.state = 1077
self.expression()
self.state = 1082
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,126,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1078
self.match(SqlBaseParser.T__3)
self.state = 1079
self.expression()
self.state = 1084
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,126,self._ctx)
elif token in [SqlBaseParser.T__1]:
localctx = SqlBaseParser.SubqueryContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 1085
self.match(SqlBaseParser.T__1)
self.state = 1086
self.queryNoWith()
self.state = 1087
self.match(SqlBaseParser.T__2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortItemContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.ordering = None # Token
self.nullOrdering = None # Token
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def NULLS(self):
return self.getToken(SqlBaseParser.NULLS, 0)
def ASC(self):
return self.getToken(SqlBaseParser.ASC, 0)
def DESC(self):
return self.getToken(SqlBaseParser.DESC, 0)
def FIRST(self):
return self.getToken(SqlBaseParser.FIRST, 0)
def LAST(self):
return self.getToken(SqlBaseParser.LAST, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_sortItem
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortItem" ):
listener.enterSortItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortItem" ):
listener.exitSortItem(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortItem" ):
return visitor.visitSortItem(self)
else:
return visitor.visitChildren(self)
def sortItem(self):
localctx = SqlBaseParser.SortItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_sortItem)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1091
self.expression()
self.state = 1093
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ASC or _la==SqlBaseParser.DESC:
self.state = 1092
localctx.ordering = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ASC or _la==SqlBaseParser.DESC):
localctx.ordering = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1097
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NULLS:
self.state = 1095
self.match(SqlBaseParser.NULLS)
self.state = 1096
localctx.nullOrdering = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FIRST or _la==SqlBaseParser.LAST):
localctx.nullOrdering = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QuerySpecificationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.where = None # BooleanExpressionContext
self.having = None # BooleanExpressionContext
def SELECT(self):
return self.getToken(SqlBaseParser.SELECT, 0)
def selectItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SelectItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SelectItemContext,i)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def relation(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RelationContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RelationContext,i)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def GROUP(self):
return self.getToken(SqlBaseParser.GROUP, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def groupBy(self):
return self.getTypedRuleContext(SqlBaseParser.GroupByContext,0)
def HAVING(self):
return self.getToken(SqlBaseParser.HAVING, 0)
def WINDOW(self):
return self.getToken(SqlBaseParser.WINDOW, 0)
def windowDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.WindowDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.WindowDefinitionContext,i)
def booleanExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.BooleanExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_querySpecification
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuerySpecification" ):
listener.enterQuerySpecification(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuerySpecification" ):
listener.exitQuerySpecification(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuerySpecification" ):
return visitor.visitQuerySpecification(self)
else:
return visitor.visitChildren(self)
def querySpecification(self):
localctx = SqlBaseParser.QuerySpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_querySpecification)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1099
self.match(SqlBaseParser.SELECT)
self.state = 1101
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,130,self._ctx)
if la_ == 1:
self.state = 1100
self.setQuantifier()
self.state = 1103
self.selectItem()
self.state = 1108
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,131,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1104
self.match(SqlBaseParser.T__3)
self.state = 1105
self.selectItem()
self.state = 1110
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,131,self._ctx)
self.state = 1120
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,133,self._ctx)
if la_ == 1:
self.state = 1111
self.match(SqlBaseParser.FROM)
self.state = 1112
self.relation(0)
self.state = 1117
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,132,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1113
self.match(SqlBaseParser.T__3)
self.state = 1114
self.relation(0)
self.state = 1119
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,132,self._ctx)
self.state = 1124
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,134,self._ctx)
if la_ == 1:
self.state = 1122
self.match(SqlBaseParser.WHERE)
self.state = 1123
localctx.where = self.booleanExpression(0)
self.state = 1129
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,135,self._ctx)
if la_ == 1:
self.state = 1126
self.match(SqlBaseParser.GROUP)
self.state = 1127
self.match(SqlBaseParser.BY)
self.state = 1128
self.groupBy()
self.state = 1133
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,136,self._ctx)
if la_ == 1:
self.state = 1131
self.match(SqlBaseParser.HAVING)
self.state = 1132
localctx.having = self.booleanExpression(0)
self.state = 1144
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,138,self._ctx)
if la_ == 1:
self.state = 1135
self.match(SqlBaseParser.WINDOW)
self.state = 1136
self.windowDefinition()
self.state = 1141
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,137,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1137
self.match(SqlBaseParser.T__3)
self.state = 1138
self.windowDefinition()
self.state = 1143
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,137,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GroupByContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def groupingElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.GroupingElementContext)
else:
return self.getTypedRuleContext(SqlBaseParser.GroupingElementContext,i)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_groupBy
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGroupBy" ):
listener.enterGroupBy(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGroupBy" ):
listener.exitGroupBy(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGroupBy" ):
return visitor.visitGroupBy(self)
else:
return visitor.visitChildren(self)
def groupBy(self):
localctx = SqlBaseParser.GroupByContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_groupBy)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1147
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,139,self._ctx)
if la_ == 1:
self.state = 1146
self.setQuantifier()
self.state = 1149
self.groupingElement()
self.state = 1154
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,140,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 1150
self.match(SqlBaseParser.T__3)
self.state = 1151
self.groupingElement()
self.state = 1156
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,140,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GroupingElementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_groupingElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class MultipleGroupingSetsContext(GroupingElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GroupingElementContext
super().__init__(parser)
self.copyFrom(ctx)
def GROUPING(self):
return self.getToken(SqlBaseParser.GROUPING, 0)
def SETS(self):
return self.getToken(SqlBaseParser.SETS, 0)
def groupingSet(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.GroupingSetContext)
else:
return self.getTypedRuleContext(SqlBaseParser.GroupingSetContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMultipleGroupingSets" ):
listener.enterMultipleGroupingSets(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMultipleGroupingSets" ):
listener.exitMultipleGroupingSets(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMultipleGroupingSets" ):
return visitor.visitMultipleGroupingSets(self)
else:
return visitor.visitChildren(self)
class SingleGroupingSetContext(GroupingElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GroupingElementContext
super().__init__(parser)
self.copyFrom(ctx)
def groupingSet(self):
return self.getTypedRuleContext(SqlBaseParser.GroupingSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSingleGroupingSet" ):
listener.enterSingleGroupingSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSingleGroupingSet" ):
listener.exitSingleGroupingSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSingleGroupingSet" ):
return visitor.visitSingleGroupingSet(self)
else:
return visitor.visitChildren(self)
class CubeContext(GroupingElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GroupingElementContext
super().__init__(parser)
self.copyFrom(ctx)
def CUBE(self):
return self.getToken(SqlBaseParser.CUBE, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCube" ):
listener.enterCube(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCube" ):
listener.exitCube(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCube" ):
return visitor.visitCube(self)
else:
return visitor.visitChildren(self)
class RollupContext(GroupingElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GroupingElementContext
super().__init__(parser)
self.copyFrom(ctx)
def ROLLUP(self):
return self.getToken(SqlBaseParser.ROLLUP, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRollup" ):
listener.enterRollup(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRollup" ):
listener.exitRollup(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRollup" ):
return visitor.visitRollup(self)
else:
return visitor.visitChildren(self)
def groupingElement(self):
localctx = SqlBaseParser.GroupingElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_groupingElement)
self._la = 0 # Token type
try:
self.state = 1197
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,146,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.SingleGroupingSetContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1157
self.groupingSet()
elif la_ == 2:
localctx = SqlBaseParser.RollupContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1158
self.match(SqlBaseParser.ROLLUP)
self.state = 1159
self.match(SqlBaseParser.T__1)
self.state = 1168
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1160
self.expression()
self.state = 1165
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1161
self.match(SqlBaseParser.T__3)
self.state = 1162
self.expression()
self.state = 1167
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1170
self.match(SqlBaseParser.T__2)
elif la_ == 3:
localctx = SqlBaseParser.CubeContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1171
self.match(SqlBaseParser.CUBE)
self.state = 1172
self.match(SqlBaseParser.T__1)
self.state = 1181
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1173
self.expression()
self.state = 1178
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1174
self.match(SqlBaseParser.T__3)
self.state = 1175
self.expression()
self.state = 1180
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1183
self.match(SqlBaseParser.T__2)
elif la_ == 4:
localctx = SqlBaseParser.MultipleGroupingSetsContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 1184
self.match(SqlBaseParser.GROUPING)
self.state = 1185
self.match(SqlBaseParser.SETS)
self.state = 1186
self.match(SqlBaseParser.T__1)
self.state = 1187
self.groupingSet()
self.state = 1192
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1188
self.match(SqlBaseParser.T__3)
self.state = 1189
self.groupingSet()
self.state = 1194
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1195
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GroupingSetContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_groupingSet
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGroupingSet" ):
listener.enterGroupingSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGroupingSet" ):
listener.exitGroupingSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGroupingSet" ):
return visitor.visitGroupingSet(self)
else:
return visitor.visitChildren(self)
def groupingSet(self):
localctx = SqlBaseParser.GroupingSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_groupingSet)
self._la = 0 # Token type
try:
self.state = 1212
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,149,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1199
self.match(SqlBaseParser.T__1)
self.state = 1208
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1200
self.expression()
self.state = 1205
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1201
self.match(SqlBaseParser.T__3)
self.state = 1202
self.expression()
self.state = 1207
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1210
self.match(SqlBaseParser.T__2)
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1211
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WindowDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def windowSpecification(self):
return self.getTypedRuleContext(SqlBaseParser.WindowSpecificationContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_windowDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWindowDefinition" ):
listener.enterWindowDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWindowDefinition" ):
listener.exitWindowDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWindowDefinition" ):
return visitor.visitWindowDefinition(self)
else:
return visitor.visitChildren(self)
def windowDefinition(self):
localctx = SqlBaseParser.WindowDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_windowDefinition)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1214
localctx.name = self.identifier()
self.state = 1215
self.match(SqlBaseParser.AS)
self.state = 1216
self.match(SqlBaseParser.T__1)
self.state = 1217
self.windowSpecification()
self.state = 1218
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WindowSpecificationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.existingWindowName = None # IdentifierContext
self._expression = None # ExpressionContext
self.partition = list() # of ExpressionContexts
def PARTITION(self):
return self.getToken(SqlBaseParser.PARTITION, 0)
def BY(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.BY)
else:
return self.getToken(SqlBaseParser.BY, i)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def windowFrame(self):
return self.getTypedRuleContext(SqlBaseParser.WindowFrameContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_windowSpecification
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWindowSpecification" ):
listener.enterWindowSpecification(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWindowSpecification" ):
listener.exitWindowSpecification(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWindowSpecification" ):
return visitor.visitWindowSpecification(self)
else:
return visitor.visitChildren(self)
def windowSpecification(self):
localctx = SqlBaseParser.WindowSpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_windowSpecification)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1221
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,150,self._ctx)
if la_ == 1:
self.state = 1220
localctx.existingWindowName = self.identifier()
self.state = 1233
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.PARTITION:
self.state = 1223
self.match(SqlBaseParser.PARTITION)
self.state = 1224
self.match(SqlBaseParser.BY)
self.state = 1225
localctx._expression = self.expression()
localctx.partition.append(localctx._expression)
self.state = 1230
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1226
self.match(SqlBaseParser.T__3)
self.state = 1227
localctx._expression = self.expression()
localctx.partition.append(localctx._expression)
self.state = 1232
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1245
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ORDER:
self.state = 1235
self.match(SqlBaseParser.ORDER)
self.state = 1236
self.match(SqlBaseParser.BY)
self.state = 1237
self.sortItem()
self.state = 1242
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1238
self.match(SqlBaseParser.T__3)
self.state = 1239
self.sortItem()
self.state = 1244
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1248
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.GROUPS or _la==SqlBaseParser.MEASURES or _la==SqlBaseParser.RANGE or _la==SqlBaseParser.ROWS:
self.state = 1247
self.windowFrame()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NamedQueryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_namedQuery
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNamedQuery" ):
listener.enterNamedQuery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNamedQuery" ):
listener.exitNamedQuery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNamedQuery" ):
return visitor.visitNamedQuery(self)
else:
return visitor.visitChildren(self)
def namedQuery(self):
localctx = SqlBaseParser.NamedQueryContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_namedQuery)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1250
localctx.name = self.identifier()
self.state = 1252
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 1251
self.columnAliases()
self.state = 1254
self.match(SqlBaseParser.AS)
self.state = 1255
self.match(SqlBaseParser.T__1)
self.state = 1256
self.query()
self.state = 1257
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetQuantifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DISTINCT(self):
return self.getToken(SqlBaseParser.DISTINCT, 0)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_setQuantifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetQuantifier" ):
listener.enterSetQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetQuantifier" ):
listener.exitSetQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetQuantifier" ):
return visitor.visitSetQuantifier(self)
else:
return visitor.visitChildren(self)
def setQuantifier(self):
localctx = SqlBaseParser.SetQuantifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_setQuantifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1259
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ALL or _la==SqlBaseParser.DISTINCT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SelectItemContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_selectItem
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SelectAllContext(SelectItemContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.SelectItemContext
super().__init__(parser)
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(SqlBaseParser.PrimaryExpressionContext,0)
def ASTERISK(self):
return self.getToken(SqlBaseParser.ASTERISK, 0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSelectAll" ):
listener.enterSelectAll(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSelectAll" ):
listener.exitSelectAll(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSelectAll" ):
return visitor.visitSelectAll(self)
else:
return visitor.visitChildren(self)
class SelectSingleContext(SelectItemContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.SelectItemContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSelectSingle" ):
listener.enterSelectSingle(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSelectSingle" ):
listener.exitSelectSingle(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSelectSingle" ):
return visitor.visitSelectSingle(self)
else:
return visitor.visitChildren(self)
def selectItem(self):
localctx = SqlBaseParser.SelectItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_selectItem)
self._la = 0 # Token type
try:
self.state = 1276
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,160,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.SelectSingleContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1261
self.expression()
self.state = 1266
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,158,self._ctx)
if la_ == 1:
self.state = 1263
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AS:
self.state = 1262
self.match(SqlBaseParser.AS)
self.state = 1265
self.identifier()
elif la_ == 2:
localctx = SqlBaseParser.SelectAllContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1268
self.primaryExpression(0)
self.state = 1269
self.match(SqlBaseParser.T__0)
self.state = 1270
self.match(SqlBaseParser.ASTERISK)
self.state = 1273
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,159,self._ctx)
if la_ == 1:
self.state = 1271
self.match(SqlBaseParser.AS)
self.state = 1272
self.columnAliases()
elif la_ == 3:
localctx = SqlBaseParser.SelectAllContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1275
self.match(SqlBaseParser.ASTERISK)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RelationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_relation
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RelationDefaultContext(RelationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationContext
super().__init__(parser)
self.copyFrom(ctx)
def sampledRelation(self):
return self.getTypedRuleContext(SqlBaseParser.SampledRelationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRelationDefault" ):
listener.enterRelationDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRelationDefault" ):
listener.exitRelationDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRelationDefault" ):
return visitor.visitRelationDefault(self)
else:
return visitor.visitChildren(self)
class JoinRelationContext(RelationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationContext
super().__init__(parser)
self.left = None # RelationContext
self.right = None # SampledRelationContext
self.rightRelation = None # RelationContext
self.copyFrom(ctx)
def relation(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RelationContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RelationContext,i)
def CROSS(self):
return self.getToken(SqlBaseParser.CROSS, 0)
def JOIN(self):
return self.getToken(SqlBaseParser.JOIN, 0)
def joinType(self):
return self.getTypedRuleContext(SqlBaseParser.JoinTypeContext,0)
def joinCriteria(self):
return self.getTypedRuleContext(SqlBaseParser.JoinCriteriaContext,0)
def NATURAL(self):
return self.getToken(SqlBaseParser.NATURAL, 0)
def sampledRelation(self):
return self.getTypedRuleContext(SqlBaseParser.SampledRelationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterJoinRelation" ):
listener.enterJoinRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitJoinRelation" ):
listener.exitJoinRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitJoinRelation" ):
return visitor.visitJoinRelation(self)
else:
return visitor.visitChildren(self)
def relation(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.RelationContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 58
self.enterRecursionRule(localctx, 58, self.RULE_relation, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = SqlBaseParser.RelationDefaultContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1279
self.sampledRelation()
self._ctx.stop = self._input.LT(-1)
self.state = 1299
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,162,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = SqlBaseParser.JoinRelationContext(self, SqlBaseParser.RelationContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_relation)
self.state = 1281
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 1295
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.CROSS]:
self.state = 1282
self.match(SqlBaseParser.CROSS)
self.state = 1283
self.match(SqlBaseParser.JOIN)
self.state = 1284
localctx.right = self.sampledRelation()
elif token in [SqlBaseParser.FULL, SqlBaseParser.INNER, SqlBaseParser.JOIN, SqlBaseParser.LEFT, SqlBaseParser.RIGHT]:
self.state = 1285
self.joinType()
self.state = 1286
self.match(SqlBaseParser.JOIN)
self.state = 1287
localctx.rightRelation = self.relation(0)
self.state = 1288
self.joinCriteria()
elif token in [SqlBaseParser.NATURAL]:
self.state = 1290
self.match(SqlBaseParser.NATURAL)
self.state = 1291
self.joinType()
self.state = 1292
self.match(SqlBaseParser.JOIN)
self.state = 1293
localctx.right = self.sampledRelation()
else:
raise NoViableAltException(self)
self.state = 1301
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,162,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class JoinTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INNER(self):
return self.getToken(SqlBaseParser.INNER, 0)
def LEFT(self):
return self.getToken(SqlBaseParser.LEFT, 0)
def OUTER(self):
return self.getToken(SqlBaseParser.OUTER, 0)
def RIGHT(self):
return self.getToken(SqlBaseParser.RIGHT, 0)
def FULL(self):
return self.getToken(SqlBaseParser.FULL, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_joinType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterJoinType" ):
listener.enterJoinType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitJoinType" ):
listener.exitJoinType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitJoinType" ):
return visitor.visitJoinType(self)
else:
return visitor.visitChildren(self)
def joinType(self):
localctx = SqlBaseParser.JoinTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_joinType)
self._la = 0 # Token type
try:
self.state = 1317
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.INNER, SqlBaseParser.JOIN]:
self.enterOuterAlt(localctx, 1)
self.state = 1303
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INNER:
self.state = 1302
self.match(SqlBaseParser.INNER)
elif token in [SqlBaseParser.LEFT]:
self.enterOuterAlt(localctx, 2)
self.state = 1305
self.match(SqlBaseParser.LEFT)
self.state = 1307
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OUTER:
self.state = 1306
self.match(SqlBaseParser.OUTER)
elif token in [SqlBaseParser.RIGHT]:
self.enterOuterAlt(localctx, 3)
self.state = 1309
self.match(SqlBaseParser.RIGHT)
self.state = 1311
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OUTER:
self.state = 1310
self.match(SqlBaseParser.OUTER)
elif token in [SqlBaseParser.FULL]:
self.enterOuterAlt(localctx, 4)
self.state = 1313
self.match(SqlBaseParser.FULL)
self.state = 1315
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OUTER:
self.state = 1314
self.match(SqlBaseParser.OUTER)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class JoinCriteriaContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def USING(self):
return self.getToken(SqlBaseParser.USING, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_joinCriteria
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterJoinCriteria" ):
listener.enterJoinCriteria(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitJoinCriteria" ):
listener.exitJoinCriteria(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitJoinCriteria" ):
return visitor.visitJoinCriteria(self)
else:
return visitor.visitChildren(self)
def joinCriteria(self):
localctx = SqlBaseParser.JoinCriteriaContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_joinCriteria)
self._la = 0 # Token type
try:
self.state = 1333
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ON]:
self.enterOuterAlt(localctx, 1)
self.state = 1319
self.match(SqlBaseParser.ON)
self.state = 1320
self.booleanExpression(0)
elif token in [SqlBaseParser.USING]:
self.enterOuterAlt(localctx, 2)
self.state = 1321
self.match(SqlBaseParser.USING)
self.state = 1322
self.match(SqlBaseParser.T__1)
self.state = 1323
self.identifier()
self.state = 1328
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1324
self.match(SqlBaseParser.T__3)
self.state = 1325
self.identifier()
self.state = 1330
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1331
self.match(SqlBaseParser.T__2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SampledRelationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.percentage = None # ExpressionContext
def patternRecognition(self):
return self.getTypedRuleContext(SqlBaseParser.PatternRecognitionContext,0)
def TABLESAMPLE(self):
return self.getToken(SqlBaseParser.TABLESAMPLE, 0)
def sampleType(self):
return self.getTypedRuleContext(SqlBaseParser.SampleTypeContext,0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_sampledRelation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSampledRelation" ):
listener.enterSampledRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSampledRelation" ):
listener.exitSampledRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSampledRelation" ):
return visitor.visitSampledRelation(self)
else:
return visitor.visitChildren(self)
def sampledRelation(self):
localctx = SqlBaseParser.SampledRelationContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_sampledRelation)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1335
self.patternRecognition()
self.state = 1342
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,170,self._ctx)
if la_ == 1:
self.state = 1336
self.match(SqlBaseParser.TABLESAMPLE)
self.state = 1337
self.sampleType()
self.state = 1338
self.match(SqlBaseParser.T__1)
self.state = 1339
localctx.percentage = self.expression()
self.state = 1340
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SampleTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BERNOULLI(self):
return self.getToken(SqlBaseParser.BERNOULLI, 0)
def SYSTEM(self):
return self.getToken(SqlBaseParser.SYSTEM, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_sampleType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSampleType" ):
listener.enterSampleType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSampleType" ):
listener.exitSampleType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSampleType" ):
return visitor.visitSampleType(self)
else:
return visitor.visitChildren(self)
def sampleType(self):
localctx = SqlBaseParser.SampleTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_sampleType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1344
_la = self._input.LA(1)
if not(_la==SqlBaseParser.BERNOULLI or _la==SqlBaseParser.SYSTEM):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ListAggOverflowBehaviorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ERROR(self):
return self.getToken(SqlBaseParser.ERROR, 0)
def TRUNCATE(self):
return self.getToken(SqlBaseParser.TRUNCATE, 0)
def listaggCountIndication(self):
return self.getTypedRuleContext(SqlBaseParser.ListaggCountIndicationContext,0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_listAggOverflowBehavior
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListAggOverflowBehavior" ):
listener.enterListAggOverflowBehavior(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListAggOverflowBehavior" ):
listener.exitListAggOverflowBehavior(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitListAggOverflowBehavior" ):
return visitor.visitListAggOverflowBehavior(self)
else:
return visitor.visitChildren(self)
def listAggOverflowBehavior(self):
localctx = SqlBaseParser.ListAggOverflowBehaviorContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_listAggOverflowBehavior)
self._la = 0 # Token type
try:
self.state = 1352
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ERROR]:
self.enterOuterAlt(localctx, 1)
self.state = 1346
self.match(SqlBaseParser.ERROR)
elif token in [SqlBaseParser.TRUNCATE]:
self.enterOuterAlt(localctx, 2)
self.state = 1347
self.match(SqlBaseParser.TRUNCATE)
self.state = 1349
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.STRING or _la==SqlBaseParser.UNICODE_STRING:
self.state = 1348
self.string()
self.state = 1351
self.listaggCountIndication()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ListaggCountIndicationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def COUNT(self):
return self.getToken(SqlBaseParser.COUNT, 0)
def WITHOUT(self):
return self.getToken(SqlBaseParser.WITHOUT, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_listaggCountIndication
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListaggCountIndication" ):
listener.enterListaggCountIndication(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListaggCountIndication" ):
listener.exitListaggCountIndication(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitListaggCountIndication" ):
return visitor.visitListaggCountIndication(self)
else:
return visitor.visitChildren(self)
def listaggCountIndication(self):
localctx = SqlBaseParser.ListaggCountIndicationContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_listaggCountIndication)
try:
self.state = 1358
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.WITH]:
self.enterOuterAlt(localctx, 1)
self.state = 1354
self.match(SqlBaseParser.WITH)
self.state = 1355
self.match(SqlBaseParser.COUNT)
elif token in [SqlBaseParser.WITHOUT]:
self.enterOuterAlt(localctx, 2)
self.state = 1356
self.match(SqlBaseParser.WITHOUT)
self.state = 1357
self.match(SqlBaseParser.COUNT)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PatternRecognitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._expression = None # ExpressionContext
self.partition = list() # of ExpressionContexts
def aliasedRelation(self):
return self.getTypedRuleContext(SqlBaseParser.AliasedRelationContext,0)
def MATCH_RECOGNIZE(self):
return self.getToken(SqlBaseParser.MATCH_RECOGNIZE, 0)
def PATTERN(self):
return self.getToken(SqlBaseParser.PATTERN, 0)
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def DEFINE(self):
return self.getToken(SqlBaseParser.DEFINE, 0)
def variableDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.VariableDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.VariableDefinitionContext,i)
def PARTITION(self):
return self.getToken(SqlBaseParser.PARTITION, 0)
def BY(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.BY)
else:
return self.getToken(SqlBaseParser.BY, i)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def MEASURES(self):
return self.getToken(SqlBaseParser.MEASURES, 0)
def measureDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.MeasureDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.MeasureDefinitionContext,i)
def rowsPerMatch(self):
return self.getTypedRuleContext(SqlBaseParser.RowsPerMatchContext,0)
def AFTER(self):
return self.getToken(SqlBaseParser.AFTER, 0)
def MATCH(self):
return self.getToken(SqlBaseParser.MATCH, 0)
def skipTo(self):
return self.getTypedRuleContext(SqlBaseParser.SkipToContext,0)
def SUBSET(self):
return self.getToken(SqlBaseParser.SUBSET, 0)
def subsetDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SubsetDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SubsetDefinitionContext,i)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def INITIAL(self):
return self.getToken(SqlBaseParser.INITIAL, 0)
def SEEK(self):
return self.getToken(SqlBaseParser.SEEK, 0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_patternRecognition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternRecognition" ):
listener.enterPatternRecognition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternRecognition" ):
listener.exitPatternRecognition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternRecognition" ):
return visitor.visitPatternRecognition(self)
else:
return visitor.visitChildren(self)
def patternRecognition(self):
localctx = SqlBaseParser.PatternRecognitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_patternRecognition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1360
self.aliasedRelation()
self.state = 1443
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,189,self._ctx)
if la_ == 1:
self.state = 1361
self.match(SqlBaseParser.MATCH_RECOGNIZE)
self.state = 1362
self.match(SqlBaseParser.T__1)
self.state = 1373
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.PARTITION:
self.state = 1363
self.match(SqlBaseParser.PARTITION)
self.state = 1364
self.match(SqlBaseParser.BY)
self.state = 1365
localctx._expression = self.expression()
localctx.partition.append(localctx._expression)
self.state = 1370
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1366
self.match(SqlBaseParser.T__3)
self.state = 1367
localctx._expression = self.expression()
localctx.partition.append(localctx._expression)
self.state = 1372
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1385
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ORDER:
self.state = 1375
self.match(SqlBaseParser.ORDER)
self.state = 1376
self.match(SqlBaseParser.BY)
self.state = 1377
self.sortItem()
self.state = 1382
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1378
self.match(SqlBaseParser.T__3)
self.state = 1379
self.sortItem()
self.state = 1384
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1396
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MEASURES:
self.state = 1387
self.match(SqlBaseParser.MEASURES)
self.state = 1388
self.measureDefinition()
self.state = 1393
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1389
self.match(SqlBaseParser.T__3)
self.state = 1390
self.measureDefinition()
self.state = 1395
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1399
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ALL or _la==SqlBaseParser.ONE:
self.state = 1398
self.rowsPerMatch()
self.state = 1404
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AFTER:
self.state = 1401
self.match(SqlBaseParser.AFTER)
self.state = 1402
self.match(SqlBaseParser.MATCH)
self.state = 1403
self.skipTo()
self.state = 1407
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INITIAL or _la==SqlBaseParser.SEEK:
self.state = 1406
_la = self._input.LA(1)
if not(_la==SqlBaseParser.INITIAL or _la==SqlBaseParser.SEEK):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1409
self.match(SqlBaseParser.PATTERN)
self.state = 1410
self.match(SqlBaseParser.T__1)
self.state = 1411
self.rowPattern(0)
self.state = 1412
self.match(SqlBaseParser.T__2)
self.state = 1422
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.SUBSET:
self.state = 1413
self.match(SqlBaseParser.SUBSET)
self.state = 1414
self.subsetDefinition()
self.state = 1419
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1415
self.match(SqlBaseParser.T__3)
self.state = 1416
self.subsetDefinition()
self.state = 1421
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1424
self.match(SqlBaseParser.DEFINE)
self.state = 1425
self.variableDefinition()
self.state = 1430
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1426
self.match(SqlBaseParser.T__3)
self.state = 1427
self.variableDefinition()
self.state = 1432
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1433
self.match(SqlBaseParser.T__2)
self.state = 1441
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,188,self._ctx)
if la_ == 1:
self.state = 1435
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AS:
self.state = 1434
self.match(SqlBaseParser.AS)
self.state = 1437
self.identifier()
self.state = 1439
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,187,self._ctx)
if la_ == 1:
self.state = 1438
self.columnAliases()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MeasureDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_measureDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMeasureDefinition" ):
listener.enterMeasureDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMeasureDefinition" ):
listener.exitMeasureDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMeasureDefinition" ):
return visitor.visitMeasureDefinition(self)
else:
return visitor.visitChildren(self)
def measureDefinition(self):
localctx = SqlBaseParser.MeasureDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_measureDefinition)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1445
self.expression()
self.state = 1446
self.match(SqlBaseParser.AS)
self.state = 1447
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RowsPerMatchContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ONE(self):
return self.getToken(SqlBaseParser.ONE, 0)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def PER(self):
return self.getToken(SqlBaseParser.PER, 0)
def MATCH(self):
return self.getToken(SqlBaseParser.MATCH, 0)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def ROWS(self):
return self.getToken(SqlBaseParser.ROWS, 0)
def emptyMatchHandling(self):
return self.getTypedRuleContext(SqlBaseParser.EmptyMatchHandlingContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_rowsPerMatch
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowsPerMatch" ):
listener.enterRowsPerMatch(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowsPerMatch" ):
listener.exitRowsPerMatch(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowsPerMatch" ):
return visitor.visitRowsPerMatch(self)
else:
return visitor.visitChildren(self)
def rowsPerMatch(self):
localctx = SqlBaseParser.RowsPerMatchContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_rowsPerMatch)
self._la = 0 # Token type
try:
self.state = 1460
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ONE]:
self.enterOuterAlt(localctx, 1)
self.state = 1449
self.match(SqlBaseParser.ONE)
self.state = 1450
self.match(SqlBaseParser.ROW)
self.state = 1451
self.match(SqlBaseParser.PER)
self.state = 1452
self.match(SqlBaseParser.MATCH)
elif token in [SqlBaseParser.ALL]:
self.enterOuterAlt(localctx, 2)
self.state = 1453
self.match(SqlBaseParser.ALL)
self.state = 1454
self.match(SqlBaseParser.ROWS)
self.state = 1455
self.match(SqlBaseParser.PER)
self.state = 1456
self.match(SqlBaseParser.MATCH)
self.state = 1458
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.OMIT or _la==SqlBaseParser.SHOW or _la==SqlBaseParser.WITH:
self.state = 1457
self.emptyMatchHandling()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EmptyMatchHandlingContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def EMPTY(self):
return self.getToken(SqlBaseParser.EMPTY, 0)
def MATCHES(self):
return self.getToken(SqlBaseParser.MATCHES, 0)
def OMIT(self):
return self.getToken(SqlBaseParser.OMIT, 0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def UNMATCHED(self):
return self.getToken(SqlBaseParser.UNMATCHED, 0)
def ROWS(self):
return self.getToken(SqlBaseParser.ROWS, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_emptyMatchHandling
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEmptyMatchHandling" ):
listener.enterEmptyMatchHandling(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEmptyMatchHandling" ):
listener.exitEmptyMatchHandling(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEmptyMatchHandling" ):
return visitor.visitEmptyMatchHandling(self)
else:
return visitor.visitChildren(self)
def emptyMatchHandling(self):
localctx = SqlBaseParser.EmptyMatchHandlingContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_emptyMatchHandling)
try:
self.state = 1471
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.SHOW]:
self.enterOuterAlt(localctx, 1)
self.state = 1462
self.match(SqlBaseParser.SHOW)
self.state = 1463
self.match(SqlBaseParser.EMPTY)
self.state = 1464
self.match(SqlBaseParser.MATCHES)
elif token in [SqlBaseParser.OMIT]:
self.enterOuterAlt(localctx, 2)
self.state = 1465
self.match(SqlBaseParser.OMIT)
self.state = 1466
self.match(SqlBaseParser.EMPTY)
self.state = 1467
self.match(SqlBaseParser.MATCHES)
elif token in [SqlBaseParser.WITH]:
self.enterOuterAlt(localctx, 3)
self.state = 1468
self.match(SqlBaseParser.WITH)
self.state = 1469
self.match(SqlBaseParser.UNMATCHED)
self.state = 1470
self.match(SqlBaseParser.ROWS)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SkipToContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def NEXT(self):
return self.getToken(SqlBaseParser.NEXT, 0)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def PAST(self):
return self.getToken(SqlBaseParser.PAST, 0)
def LAST(self):
return self.getToken(SqlBaseParser.LAST, 0)
def FIRST(self):
return self.getToken(SqlBaseParser.FIRST, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_skipTo
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSkipTo" ):
listener.enterSkipTo(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSkipTo" ):
listener.exitSkipTo(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSkipTo" ):
return visitor.visitSkipTo(self)
else:
return visitor.visitChildren(self)
def skipTo(self):
localctx = SqlBaseParser.SkipToContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_skipTo)
try:
self.state = 1492
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,193,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 1473
self.match(SqlBaseParser.T__4)
self.state = 1474
self.match(SqlBaseParser.TO)
self.state = 1475
self.match(SqlBaseParser.NEXT)
self.state = 1476
self.match(SqlBaseParser.ROW)
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 1477
self.match(SqlBaseParser.T__4)
self.state = 1478
self.match(SqlBaseParser.PAST)
self.state = 1479
self.match(SqlBaseParser.LAST)
self.state = 1480
self.match(SqlBaseParser.ROW)
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 1481
self.match(SqlBaseParser.T__4)
self.state = 1482
self.match(SqlBaseParser.TO)
self.state = 1483
self.match(SqlBaseParser.FIRST)
self.state = 1484
self.identifier()
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 1485
self.match(SqlBaseParser.T__4)
self.state = 1486
self.match(SqlBaseParser.TO)
self.state = 1487
self.match(SqlBaseParser.LAST)
self.state = 1488
self.identifier()
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 1489
self.match(SqlBaseParser.T__4)
self.state = 1490
self.match(SqlBaseParser.TO)
self.state = 1491
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SubsetDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
self._identifier = None # IdentifierContext
self.union = list() # of IdentifierContexts
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_subsetDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubsetDefinition" ):
listener.enterSubsetDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubsetDefinition" ):
listener.exitSubsetDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubsetDefinition" ):
return visitor.visitSubsetDefinition(self)
else:
return visitor.visitChildren(self)
def subsetDefinition(self):
localctx = SqlBaseParser.SubsetDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_subsetDefinition)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1494
localctx.name = self.identifier()
self.state = 1495
self.match(SqlBaseParser.EQ)
self.state = 1496
self.match(SqlBaseParser.T__1)
self.state = 1497
localctx._identifier = self.identifier()
localctx.union.append(localctx._identifier)
self.state = 1502
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1498
self.match(SqlBaseParser.T__3)
self.state = 1499
localctx._identifier = self.identifier()
localctx.union.append(localctx._identifier)
self.state = 1504
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1505
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VariableDefinitionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_variableDefinition
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVariableDefinition" ):
listener.enterVariableDefinition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVariableDefinition" ):
listener.exitVariableDefinition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVariableDefinition" ):
return visitor.visitVariableDefinition(self)
else:
return visitor.visitChildren(self)
def variableDefinition(self):
localctx = SqlBaseParser.VariableDefinitionContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_variableDefinition)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1507
self.identifier()
self.state = 1508
self.match(SqlBaseParser.AS)
self.state = 1509
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AliasedRelationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def relationPrimary(self):
return self.getTypedRuleContext(SqlBaseParser.RelationPrimaryContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def columnAliases(self):
return self.getTypedRuleContext(SqlBaseParser.ColumnAliasesContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_aliasedRelation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAliasedRelation" ):
listener.enterAliasedRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAliasedRelation" ):
listener.exitAliasedRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAliasedRelation" ):
return visitor.visitAliasedRelation(self)
else:
return visitor.visitChildren(self)
def aliasedRelation(self):
localctx = SqlBaseParser.AliasedRelationContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_aliasedRelation)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1511
self.relationPrimary()
self.state = 1519
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,197,self._ctx)
if la_ == 1:
self.state = 1513
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AS:
self.state = 1512
self.match(SqlBaseParser.AS)
self.state = 1515
self.identifier()
self.state = 1517
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,196,self._ctx)
if la_ == 1:
self.state = 1516
self.columnAliases()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ColumnAliasesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_columnAliases
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterColumnAliases" ):
listener.enterColumnAliases(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitColumnAliases" ):
listener.exitColumnAliases(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitColumnAliases" ):
return visitor.visitColumnAliases(self)
else:
return visitor.visitChildren(self)
def columnAliases(self):
localctx = SqlBaseParser.ColumnAliasesContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_columnAliases)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1521
self.match(SqlBaseParser.T__1)
self.state = 1522
self.identifier()
self.state = 1527
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1523
self.match(SqlBaseParser.T__3)
self.state = 1524
self.identifier()
self.state = 1529
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1530
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RelationPrimaryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_relationPrimary
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SubqueryRelationContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubqueryRelation" ):
listener.enterSubqueryRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubqueryRelation" ):
listener.exitSubqueryRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubqueryRelation" ):
return visitor.visitSubqueryRelation(self)
else:
return visitor.visitChildren(self)
class ParenthesizedRelationContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def relation(self):
return self.getTypedRuleContext(SqlBaseParser.RelationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParenthesizedRelation" ):
listener.enterParenthesizedRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParenthesizedRelation" ):
listener.exitParenthesizedRelation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParenthesizedRelation" ):
return visitor.visitParenthesizedRelation(self)
else:
return visitor.visitChildren(self)
class UnnestContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def UNNEST(self):
return self.getToken(SqlBaseParser.UNNEST, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def ORDINALITY(self):
return self.getToken(SqlBaseParser.ORDINALITY, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnnest" ):
listener.enterUnnest(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnnest" ):
listener.exitUnnest(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnnest" ):
return visitor.visitUnnest(self)
else:
return visitor.visitChildren(self)
class LateralContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def LATERAL(self):
return self.getToken(SqlBaseParser.LATERAL, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLateral" ):
listener.enterLateral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLateral" ):
listener.exitLateral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLateral" ):
return visitor.visitLateral(self)
else:
return visitor.visitChildren(self)
class TableNameContext(RelationPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RelationPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def queryPeriod(self):
return self.getTypedRuleContext(SqlBaseParser.QueryPeriodContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTableName" ):
listener.enterTableName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTableName" ):
listener.exitTableName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTableName" ):
return visitor.visitTableName(self)
else:
return visitor.visitChildren(self)
def relationPrimary(self):
localctx = SqlBaseParser.RelationPrimaryContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_relationPrimary)
self._la = 0 # Token type
try:
self.state = 1564
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,202,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.TableNameContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1532
self.qualifiedName()
self.state = 1534
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,199,self._ctx)
if la_ == 1:
self.state = 1533
self.queryPeriod()
elif la_ == 2:
localctx = SqlBaseParser.SubqueryRelationContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1536
self.match(SqlBaseParser.T__1)
self.state = 1537
self.query()
self.state = 1538
self.match(SqlBaseParser.T__2)
elif la_ == 3:
localctx = SqlBaseParser.UnnestContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1540
self.match(SqlBaseParser.UNNEST)
self.state = 1541
self.match(SqlBaseParser.T__1)
self.state = 1542
self.expression()
self.state = 1547
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1543
self.match(SqlBaseParser.T__3)
self.state = 1544
self.expression()
self.state = 1549
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1550
self.match(SqlBaseParser.T__2)
self.state = 1553
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,201,self._ctx)
if la_ == 1:
self.state = 1551
self.match(SqlBaseParser.WITH)
self.state = 1552
self.match(SqlBaseParser.ORDINALITY)
elif la_ == 4:
localctx = SqlBaseParser.LateralContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 1555
self.match(SqlBaseParser.LATERAL)
self.state = 1556
self.match(SqlBaseParser.T__1)
self.state = 1557
self.query()
self.state = 1558
self.match(SqlBaseParser.T__2)
elif la_ == 5:
localctx = SqlBaseParser.ParenthesizedRelationContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 1560
self.match(SqlBaseParser.T__1)
self.state = 1561
self.relation(0)
self.state = 1562
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpression" ):
return visitor.visitExpression(self)
else:
return visitor.visitChildren(self)
def expression(self):
localctx = SqlBaseParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_expression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1566
self.booleanExpression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BooleanExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_booleanExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LogicalNotContext(BooleanExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.BooleanExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLogicalNot" ):
listener.enterLogicalNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLogicalNot" ):
listener.exitLogicalNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalNot" ):
return visitor.visitLogicalNot(self)
else:
return visitor.visitChildren(self)
class PredicatedContext(BooleanExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.BooleanExpressionContext
super().__init__(parser)
self._valueExpression = None # ValueExpressionContext
self.copyFrom(ctx)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def predicate(self):
return self.getTypedRuleContext(SqlBaseParser.PredicateContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPredicated" ):
listener.enterPredicated(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPredicated" ):
listener.exitPredicated(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPredicated" ):
return visitor.visitPredicated(self)
else:
return visitor.visitChildren(self)
class And_Context(BooleanExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.BooleanExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def booleanExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.BooleanExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,i)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAnd_" ):
listener.enterAnd_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAnd_" ):
listener.exitAnd_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAnd_" ):
return visitor.visitAnd_(self)
else:
return visitor.visitChildren(self)
class Or_Context(BooleanExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.BooleanExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def booleanExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.BooleanExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,i)
def OR(self):
return self.getToken(SqlBaseParser.OR, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOr_" ):
listener.enterOr_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOr_" ):
listener.exitOr_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOr_" ):
return visitor.visitOr_(self)
else:
return visitor.visitChildren(self)
def booleanExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.BooleanExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 94
self.enterRecursionRule(localctx, 94, self.RULE_booleanExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 1575
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.T__1, SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CASE, SqlBaseParser.CAST, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.CURRENT_CATALOG, SqlBaseParser.CURRENT_DATE, SqlBaseParser.CURRENT_PATH, SqlBaseParser.CURRENT_SCHEMA, SqlBaseParser.CURRENT_TIME, SqlBaseParser.CURRENT_TIMESTAMP, SqlBaseParser.CURRENT_USER, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXISTS, SqlBaseParser.EXPLAIN, SqlBaseParser.EXTRACT, SqlBaseParser.FALSE, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPING, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LISTAGG, SqlBaseParser.LOCAL, SqlBaseParser.LOCALTIME, SqlBaseParser.LOCALTIMESTAMP, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NORMALIZE, SqlBaseParser.NULL, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUE, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.PLUS, SqlBaseParser.MINUS, SqlBaseParser.QUESTION_MARK, SqlBaseParser.STRING, SqlBaseParser.UNICODE_STRING, SqlBaseParser.BINARY_LITERAL, SqlBaseParser.INTEGER_VALUE, SqlBaseParser.DECIMAL_VALUE, SqlBaseParser.DOUBLE_VALUE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
localctx = SqlBaseParser.PredicatedContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1569
localctx._valueExpression = self.valueExpression(0)
self.state = 1571
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,203,self._ctx)
if la_ == 1:
self.state = 1570
self.predicate(localctx._valueExpression)
elif token in [SqlBaseParser.NOT]:
localctx = SqlBaseParser.LogicalNotContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1573
self.match(SqlBaseParser.NOT)
self.state = 1574
self.booleanExpression(3)
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 1585
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,206,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 1583
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,205,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.And_Context(self, SqlBaseParser.BooleanExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_booleanExpression)
self.state = 1577
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 1578
self.match(SqlBaseParser.AND)
self.state = 1579
self.booleanExpression(3)
elif la_ == 2:
localctx = SqlBaseParser.Or_Context(self, SqlBaseParser.BooleanExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_booleanExpression)
self.state = 1580
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1581
self.match(SqlBaseParser.OR)
self.state = 1582
self.booleanExpression(2)
self.state = 1587
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,206,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PredicateContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1, value:ParserRuleContext=None):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None
self.value = value
def getRuleIndex(self):
return SqlBaseParser.RULE_predicate
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
self.value = ctx.value
class ComparisonContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.right = None # ValueExpressionContext
self.copyFrom(ctx)
def comparisonOperator(self):
return self.getTypedRuleContext(SqlBaseParser.ComparisonOperatorContext,0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComparison" ):
listener.enterComparison(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComparison" ):
listener.exitComparison(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitComparison" ):
return visitor.visitComparison(self)
else:
return visitor.visitChildren(self)
class LikeContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.pattern = None # ValueExpressionContext
self.escape = None # ValueExpressionContext
self.copyFrom(ctx)
def LIKE(self):
return self.getToken(SqlBaseParser.LIKE, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def ESCAPE(self):
return self.getToken(SqlBaseParser.ESCAPE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLike" ):
listener.enterLike(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLike" ):
listener.exitLike(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLike" ):
return visitor.visitLike(self)
else:
return visitor.visitChildren(self)
class InSubqueryContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.copyFrom(ctx)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInSubquery" ):
listener.enterInSubquery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInSubquery" ):
listener.exitInSubquery(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInSubquery" ):
return visitor.visitInSubquery(self)
else:
return visitor.visitChildren(self)
class DistinctFromContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.right = None # ValueExpressionContext
self.copyFrom(ctx)
def IS(self):
return self.getToken(SqlBaseParser.IS, 0)
def DISTINCT(self):
return self.getToken(SqlBaseParser.DISTINCT, 0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDistinctFrom" ):
listener.enterDistinctFrom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDistinctFrom" ):
listener.exitDistinctFrom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDistinctFrom" ):
return visitor.visitDistinctFrom(self)
else:
return visitor.visitChildren(self)
class InListContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.copyFrom(ctx)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInList" ):
listener.enterInList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInList" ):
listener.exitInList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInList" ):
return visitor.visitInList(self)
else:
return visitor.visitChildren(self)
class NullPredicateContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.copyFrom(ctx)
def IS(self):
return self.getToken(SqlBaseParser.IS, 0)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNullPredicate" ):
listener.enterNullPredicate(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNullPredicate" ):
listener.exitNullPredicate(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNullPredicate" ):
return visitor.visitNullPredicate(self)
else:
return visitor.visitChildren(self)
class BetweenContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.lower = None # ValueExpressionContext
self.upper = None # ValueExpressionContext
self.copyFrom(ctx)
def BETWEEN(self):
return self.getToken(SqlBaseParser.BETWEEN, 0)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBetween" ):
listener.enterBetween(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBetween" ):
listener.exitBetween(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBetween" ):
return visitor.visitBetween(self)
else:
return visitor.visitChildren(self)
class QuantifiedComparisonContext(PredicateContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PredicateContext
super().__init__(parser)
self.copyFrom(ctx)
def comparisonOperator(self):
return self.getTypedRuleContext(SqlBaseParser.ComparisonOperatorContext,0)
def comparisonQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.ComparisonQuantifierContext,0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuantifiedComparison" ):
listener.enterQuantifiedComparison(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuantifiedComparison" ):
listener.exitQuantifiedComparison(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuantifiedComparison" ):
return visitor.visitQuantifiedComparison(self)
else:
return visitor.visitChildren(self)
def predicate(self, value:ParserRuleContext):
localctx = SqlBaseParser.PredicateContext(self, self._ctx, self.state, value)
self.enterRule(localctx, 96, self.RULE_predicate)
self._la = 0 # Token type
try:
self.state = 1649
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,215,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ComparisonContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1588
self.comparisonOperator()
self.state = 1589
localctx.right = self.valueExpression(0)
elif la_ == 2:
localctx = SqlBaseParser.QuantifiedComparisonContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1591
self.comparisonOperator()
self.state = 1592
self.comparisonQuantifier()
self.state = 1593
self.match(SqlBaseParser.T__1)
self.state = 1594
self.query()
self.state = 1595
self.match(SqlBaseParser.T__2)
elif la_ == 3:
localctx = SqlBaseParser.BetweenContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 1598
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1597
self.match(SqlBaseParser.NOT)
self.state = 1600
self.match(SqlBaseParser.BETWEEN)
self.state = 1601
localctx.lower = self.valueExpression(0)
self.state = 1602
self.match(SqlBaseParser.AND)
self.state = 1603
localctx.upper = self.valueExpression(0)
elif la_ == 4:
localctx = SqlBaseParser.InListContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 1606
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1605
self.match(SqlBaseParser.NOT)
self.state = 1608
self.match(SqlBaseParser.IN)
self.state = 1609
self.match(SqlBaseParser.T__1)
self.state = 1610
self.expression()
self.state = 1615
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1611
self.match(SqlBaseParser.T__3)
self.state = 1612
self.expression()
self.state = 1617
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1618
self.match(SqlBaseParser.T__2)
elif la_ == 5:
localctx = SqlBaseParser.InSubqueryContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 1621
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1620
self.match(SqlBaseParser.NOT)
self.state = 1623
self.match(SqlBaseParser.IN)
self.state = 1624
self.match(SqlBaseParser.T__1)
self.state = 1625
self.query()
self.state = 1626
self.match(SqlBaseParser.T__2)
elif la_ == 6:
localctx = SqlBaseParser.LikeContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 1629
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1628
self.match(SqlBaseParser.NOT)
self.state = 1631
self.match(SqlBaseParser.LIKE)
self.state = 1632
localctx.pattern = self.valueExpression(0)
self.state = 1635
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,212,self._ctx)
if la_ == 1:
self.state = 1633
self.match(SqlBaseParser.ESCAPE)
self.state = 1634
localctx.escape = self.valueExpression(0)
elif la_ == 7:
localctx = SqlBaseParser.NullPredicateContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 1637
self.match(SqlBaseParser.IS)
self.state = 1639
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1638
self.match(SqlBaseParser.NOT)
self.state = 1641
self.match(SqlBaseParser.NULL)
elif la_ == 8:
localctx = SqlBaseParser.DistinctFromContext(self, localctx)
self.enterOuterAlt(localctx, 8)
self.state = 1642
self.match(SqlBaseParser.IS)
self.state = 1644
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.NOT:
self.state = 1643
self.match(SqlBaseParser.NOT)
self.state = 1646
self.match(SqlBaseParser.DISTINCT)
self.state = 1647
self.match(SqlBaseParser.FROM)
self.state = 1648
localctx.right = self.valueExpression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ValueExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_valueExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ValueExpressionDefaultContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(SqlBaseParser.PrimaryExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterValueExpressionDefault" ):
listener.enterValueExpressionDefault(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitValueExpressionDefault" ):
listener.exitValueExpressionDefault(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitValueExpressionDefault" ):
return visitor.visitValueExpressionDefault(self)
else:
return visitor.visitChildren(self)
class ConcatenationContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.left = None # ValueExpressionContext
self.right = None # ValueExpressionContext
self.copyFrom(ctx)
def CONCAT(self):
return self.getToken(SqlBaseParser.CONCAT, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConcatenation" ):
listener.enterConcatenation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConcatenation" ):
listener.exitConcatenation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConcatenation" ):
return visitor.visitConcatenation(self)
else:
return visitor.visitChildren(self)
class ArithmeticBinaryContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.left = None # ValueExpressionContext
self.operator = None # Token
self.right = None # ValueExpressionContext
self.copyFrom(ctx)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def ASTERISK(self):
return self.getToken(SqlBaseParser.ASTERISK, 0)
def SLASH(self):
return self.getToken(SqlBaseParser.SLASH, 0)
def PERCENT(self):
return self.getToken(SqlBaseParser.PERCENT, 0)
def PLUS(self):
return self.getToken(SqlBaseParser.PLUS, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArithmeticBinary" ):
listener.enterArithmeticBinary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArithmeticBinary" ):
listener.exitArithmeticBinary(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArithmeticBinary" ):
return visitor.visitArithmeticBinary(self)
else:
return visitor.visitChildren(self)
class ArithmeticUnaryContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.operator = None # Token
self.copyFrom(ctx)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def PLUS(self):
return self.getToken(SqlBaseParser.PLUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArithmeticUnary" ):
listener.enterArithmeticUnary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArithmeticUnary" ):
listener.exitArithmeticUnary(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArithmeticUnary" ):
return visitor.visitArithmeticUnary(self)
else:
return visitor.visitChildren(self)
class AtTimeZoneContext(ValueExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ValueExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def AT(self):
return self.getToken(SqlBaseParser.AT, 0)
def timeZoneSpecifier(self):
return self.getTypedRuleContext(SqlBaseParser.TimeZoneSpecifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtTimeZone" ):
listener.enterAtTimeZone(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtTimeZone" ):
listener.exitAtTimeZone(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtTimeZone" ):
return visitor.visitAtTimeZone(self)
else:
return visitor.visitChildren(self)
def valueExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.ValueExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 98
self.enterRecursionRule(localctx, 98, self.RULE_valueExpression, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1655
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,216,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ValueExpressionDefaultContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1652
self.primaryExpression(0)
elif la_ == 2:
localctx = SqlBaseParser.ArithmeticUnaryContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1653
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.PLUS or _la==SqlBaseParser.MINUS):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1654
self.valueExpression(4)
self._ctx.stop = self._input.LT(-1)
self.state = 1671
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,218,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 1669
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,217,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ArithmeticBinaryContext(self, SqlBaseParser.ValueExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_valueExpression)
self.state = 1657
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 1658
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(((((_la - 263)) & ~0x3f) == 0 and ((1 << (_la - 263)) & ((1 << (SqlBaseParser.ASTERISK - 263)) | (1 << (SqlBaseParser.SLASH - 263)) | (1 << (SqlBaseParser.PERCENT - 263)))) != 0)):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1659
localctx.right = self.valueExpression(4)
elif la_ == 2:
localctx = SqlBaseParser.ArithmeticBinaryContext(self, SqlBaseParser.ValueExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_valueExpression)
self.state = 1660
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 1661
localctx.operator = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.PLUS or _la==SqlBaseParser.MINUS):
localctx.operator = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 1662
localctx.right = self.valueExpression(3)
elif la_ == 3:
localctx = SqlBaseParser.ConcatenationContext(self, SqlBaseParser.ValueExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_valueExpression)
self.state = 1663
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 1664
self.match(SqlBaseParser.CONCAT)
self.state = 1665
localctx.right = self.valueExpression(2)
elif la_ == 4:
localctx = SqlBaseParser.AtTimeZoneContext(self, SqlBaseParser.ValueExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_valueExpression)
self.state = 1666
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 1667
self.match(SqlBaseParser.AT)
self.state = 1668
self.timeZoneSpecifier()
self.state = 1673
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,218,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PrimaryExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_primaryExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class DereferenceContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.base = None # PrimaryExpressionContext
self.fieldName = None # IdentifierContext
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(SqlBaseParser.PrimaryExpressionContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDereference" ):
listener.enterDereference(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDereference" ):
listener.exitDereference(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDereference" ):
return visitor.visitDereference(self)
else:
return visitor.visitChildren(self)
class TypeConstructorContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def DOUBLE(self):
return self.getToken(SqlBaseParser.DOUBLE, 0)
def PRECISION(self):
return self.getToken(SqlBaseParser.PRECISION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeConstructor" ):
listener.enterTypeConstructor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeConstructor" ):
listener.exitTypeConstructor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeConstructor" ):
return visitor.visitTypeConstructor(self)
else:
return visitor.visitChildren(self)
class SpecialDateTimeFunctionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.precision = None # Token
self.copyFrom(ctx)
def CURRENT_DATE(self):
return self.getToken(SqlBaseParser.CURRENT_DATE, 0)
def CURRENT_TIME(self):
return self.getToken(SqlBaseParser.CURRENT_TIME, 0)
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def CURRENT_TIMESTAMP(self):
return self.getToken(SqlBaseParser.CURRENT_TIMESTAMP, 0)
def LOCALTIME(self):
return self.getToken(SqlBaseParser.LOCALTIME, 0)
def LOCALTIMESTAMP(self):
return self.getToken(SqlBaseParser.LOCALTIMESTAMP, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecialDateTimeFunction" ):
listener.enterSpecialDateTimeFunction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecialDateTimeFunction" ):
listener.exitSpecialDateTimeFunction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSpecialDateTimeFunction" ):
return visitor.visitSpecialDateTimeFunction(self)
else:
return visitor.visitChildren(self)
class SubstringContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def SUBSTRING(self):
return self.getToken(SqlBaseParser.SUBSTRING, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubstring" ):
listener.enterSubstring(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubstring" ):
listener.exitSubstring(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubstring" ):
return visitor.visitSubstring(self)
else:
return visitor.visitChildren(self)
class CastContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def CAST(self):
return self.getToken(SqlBaseParser.CAST, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def TRY_CAST(self):
return self.getToken(SqlBaseParser.TRY_CAST, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCast" ):
listener.enterCast(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCast" ):
listener.exitCast(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCast" ):
return visitor.visitCast(self)
else:
return visitor.visitChildren(self)
class ParenthesizedExpressionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParenthesizedExpression" ):
listener.enterParenthesizedExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParenthesizedExpression" ):
listener.exitParenthesizedExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParenthesizedExpression" ):
return visitor.visitParenthesizedExpression(self)
else:
return visitor.visitChildren(self)
class ParameterContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParameter" ):
listener.enterParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParameter" ):
listener.exitParameter(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameter" ):
return visitor.visitParameter(self)
else:
return visitor.visitChildren(self)
class NormalizeContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def NORMALIZE(self):
return self.getToken(SqlBaseParser.NORMALIZE, 0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def normalForm(self):
return self.getTypedRuleContext(SqlBaseParser.NormalFormContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNormalize" ):
listener.enterNormalize(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNormalize" ):
listener.exitNormalize(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNormalize" ):
return visitor.visitNormalize(self)
else:
return visitor.visitChildren(self)
class IntervalLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def interval(self):
return self.getTypedRuleContext(SqlBaseParser.IntervalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntervalLiteral" ):
listener.enterIntervalLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntervalLiteral" ):
listener.exitIntervalLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntervalLiteral" ):
return visitor.visitIntervalLiteral(self)
else:
return visitor.visitChildren(self)
class NumericLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def number(self):
return self.getTypedRuleContext(SqlBaseParser.NumberContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNumericLiteral" ):
listener.enterNumericLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNumericLiteral" ):
listener.exitNumericLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNumericLiteral" ):
return visitor.visitNumericLiteral(self)
else:
return visitor.visitChildren(self)
class BooleanLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def booleanValue(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanValueContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBooleanLiteral" ):
listener.enterBooleanLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBooleanLiteral" ):
listener.exitBooleanLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBooleanLiteral" ):
return visitor.visitBooleanLiteral(self)
else:
return visitor.visitChildren(self)
class SimpleCaseContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.operand = None # ExpressionContext
self.elseExpression = None # ExpressionContext
self.copyFrom(ctx)
def CASE(self):
return self.getToken(SqlBaseParser.CASE, 0)
def END(self):
return self.getToken(SqlBaseParser.END, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def whenClause(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.WhenClauseContext)
else:
return self.getTypedRuleContext(SqlBaseParser.WhenClauseContext,i)
def ELSE(self):
return self.getToken(SqlBaseParser.ELSE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSimpleCase" ):
listener.enterSimpleCase(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSimpleCase" ):
listener.exitSimpleCase(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSimpleCase" ):
return visitor.visitSimpleCase(self)
else:
return visitor.visitChildren(self)
class ColumnReferenceContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterColumnReference" ):
listener.enterColumnReference(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitColumnReference" ):
listener.exitColumnReference(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitColumnReference" ):
return visitor.visitColumnReference(self)
else:
return visitor.visitChildren(self)
class NullLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def NULL(self):
return self.getToken(SqlBaseParser.NULL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNullLiteral" ):
listener.enterNullLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNullLiteral" ):
listener.exitNullLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNullLiteral" ):
return visitor.visitNullLiteral(self)
else:
return visitor.visitChildren(self)
class RowConstructorContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowConstructor" ):
listener.enterRowConstructor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowConstructor" ):
listener.exitRowConstructor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowConstructor" ):
return visitor.visitRowConstructor(self)
else:
return visitor.visitChildren(self)
class SubscriptContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.value = None # PrimaryExpressionContext
self.index = None # ValueExpressionContext
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(SqlBaseParser.PrimaryExpressionContext,0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubscript" ):
listener.enterSubscript(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubscript" ):
listener.exitSubscript(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubscript" ):
return visitor.visitSubscript(self)
else:
return visitor.visitChildren(self)
class CurrentPathContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def CURRENT_PATH(self):
return self.getToken(SqlBaseParser.CURRENT_PATH, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentPath" ):
listener.enterCurrentPath(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentPath" ):
listener.exitCurrentPath(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentPath" ):
return visitor.visitCurrentPath(self)
else:
return visitor.visitChildren(self)
class SubqueryExpressionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSubqueryExpression" ):
listener.enterSubqueryExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSubqueryExpression" ):
listener.exitSubqueryExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSubqueryExpression" ):
return visitor.visitSubqueryExpression(self)
else:
return visitor.visitChildren(self)
class Lambda_Context(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLambda_" ):
listener.enterLambda_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLambda_" ):
listener.exitLambda_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLambda_" ):
return visitor.visitLambda_(self)
else:
return visitor.visitChildren(self)
class BinaryLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def BINARY_LITERAL(self):
return self.getToken(SqlBaseParser.BINARY_LITERAL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBinaryLiteral" ):
listener.enterBinaryLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBinaryLiteral" ):
listener.exitBinaryLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBinaryLiteral" ):
return visitor.visitBinaryLiteral(self)
else:
return visitor.visitChildren(self)
class CurrentUserContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def CURRENT_USER(self):
return self.getToken(SqlBaseParser.CURRENT_USER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentUser" ):
listener.enterCurrentUser(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentUser" ):
listener.exitCurrentUser(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentUser" ):
return visitor.visitCurrentUser(self)
else:
return visitor.visitChildren(self)
class MeasureContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def over(self):
return self.getTypedRuleContext(SqlBaseParser.OverContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMeasure" ):
listener.enterMeasure(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMeasure" ):
listener.exitMeasure(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMeasure" ):
return visitor.visitMeasure(self)
else:
return visitor.visitChildren(self)
class ExtractContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def EXTRACT(self):
return self.getToken(SqlBaseParser.EXTRACT, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def FROM(self):
return self.getToken(SqlBaseParser.FROM, 0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExtract" ):
listener.enterExtract(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExtract" ):
listener.exitExtract(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExtract" ):
return visitor.visitExtract(self)
else:
return visitor.visitChildren(self)
class StringLiteralContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringLiteral" ):
listener.enterStringLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringLiteral" ):
listener.exitStringLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStringLiteral" ):
return visitor.visitStringLiteral(self)
else:
return visitor.visitChildren(self)
class ArrayConstructorContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def ARRAY(self):
return self.getToken(SqlBaseParser.ARRAY, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArrayConstructor" ):
listener.enterArrayConstructor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArrayConstructor" ):
listener.exitArrayConstructor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArrayConstructor" ):
return visitor.visitArrayConstructor(self)
else:
return visitor.visitChildren(self)
class FunctionCallContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.label = None # IdentifierContext
self.copyFrom(ctx)
def qualifiedName(self):
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,0)
def ASTERISK(self):
return self.getToken(SqlBaseParser.ASTERISK, 0)
def processingMode(self):
return self.getTypedRuleContext(SqlBaseParser.ProcessingModeContext,0)
def filter_(self):
return self.getTypedRuleContext(SqlBaseParser.Filter_Context,0)
def over(self):
return self.getTypedRuleContext(SqlBaseParser.OverContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def nullTreatment(self):
return self.getTypedRuleContext(SqlBaseParser.NullTreatmentContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunctionCall" ):
listener.enterFunctionCall(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunctionCall" ):
listener.exitFunctionCall(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunctionCall" ):
return visitor.visitFunctionCall(self)
else:
return visitor.visitChildren(self)
class CurrentSchemaContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def CURRENT_SCHEMA(self):
return self.getToken(SqlBaseParser.CURRENT_SCHEMA, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentSchema" ):
listener.enterCurrentSchema(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentSchema" ):
listener.exitCurrentSchema(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentSchema" ):
return visitor.visitCurrentSchema(self)
else:
return visitor.visitChildren(self)
class ExistsContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def EXISTS(self):
return self.getToken(SqlBaseParser.EXISTS, 0)
def query(self):
return self.getTypedRuleContext(SqlBaseParser.QueryContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExists" ):
listener.enterExists(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExists" ):
listener.exitExists(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExists" ):
return visitor.visitExists(self)
else:
return visitor.visitChildren(self)
class PositionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def POSITION(self):
return self.getToken(SqlBaseParser.POSITION, 0)
def valueExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ValueExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,i)
def IN(self):
return self.getToken(SqlBaseParser.IN, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPosition" ):
listener.enterPosition(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPosition" ):
listener.exitPosition(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPosition" ):
return visitor.visitPosition(self)
else:
return visitor.visitChildren(self)
class ListaggContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def LISTAGG(self):
return self.getToken(SqlBaseParser.LISTAGG, 0)
def WITHIN(self):
return self.getToken(SqlBaseParser.WITHIN, 0)
def GROUP(self):
return self.getToken(SqlBaseParser.GROUP, 0)
def ORDER(self):
return self.getToken(SqlBaseParser.ORDER, 0)
def BY(self):
return self.getToken(SqlBaseParser.BY, 0)
def sortItem(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SortItemContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SortItemContext,i)
def setQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.SetQuantifierContext,0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def ON(self):
return self.getToken(SqlBaseParser.ON, 0)
def OVERFLOW(self):
return self.getToken(SqlBaseParser.OVERFLOW, 0)
def listAggOverflowBehavior(self):
return self.getTypedRuleContext(SqlBaseParser.ListAggOverflowBehaviorContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterListagg" ):
listener.enterListagg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitListagg" ):
listener.exitListagg(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitListagg" ):
return visitor.visitListagg(self)
else:
return visitor.visitChildren(self)
class SearchedCaseContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.elseExpression = None # ExpressionContext
self.copyFrom(ctx)
def CASE(self):
return self.getToken(SqlBaseParser.CASE, 0)
def END(self):
return self.getToken(SqlBaseParser.END, 0)
def whenClause(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.WhenClauseContext)
else:
return self.getTypedRuleContext(SqlBaseParser.WhenClauseContext,i)
def ELSE(self):
return self.getToken(SqlBaseParser.ELSE, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSearchedCase" ):
listener.enterSearchedCase(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSearchedCase" ):
listener.exitSearchedCase(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSearchedCase" ):
return visitor.visitSearchedCase(self)
else:
return visitor.visitChildren(self)
class CurrentCatalogContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.name = None # Token
self.copyFrom(ctx)
def CURRENT_CATALOG(self):
return self.getToken(SqlBaseParser.CURRENT_CATALOG, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentCatalog" ):
listener.enterCurrentCatalog(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentCatalog" ):
listener.exitCurrentCatalog(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentCatalog" ):
return visitor.visitCurrentCatalog(self)
else:
return visitor.visitChildren(self)
class GroupingOperationContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def GROUPING(self):
return self.getToken(SqlBaseParser.GROUPING, 0)
def qualifiedName(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.QualifiedNameContext)
else:
return self.getTypedRuleContext(SqlBaseParser.QualifiedNameContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGroupingOperation" ):
listener.enterGroupingOperation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGroupingOperation" ):
listener.exitGroupingOperation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGroupingOperation" ):
return visitor.visitGroupingOperation(self)
else:
return visitor.visitChildren(self)
def primaryExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.PrimaryExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 100
self.enterRecursionRule(localctx, 100, self.RULE_primaryExpression, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1962
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,254,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.NullLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1675
self.match(SqlBaseParser.NULL)
elif la_ == 2:
localctx = SqlBaseParser.IntervalLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1676
self.interval()
elif la_ == 3:
localctx = SqlBaseParser.TypeConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1677
self.identifier()
self.state = 1678
self.string()
elif la_ == 4:
localctx = SqlBaseParser.TypeConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1680
self.match(SqlBaseParser.DOUBLE)
self.state = 1681
self.match(SqlBaseParser.PRECISION)
self.state = 1682
self.string()
elif la_ == 5:
localctx = SqlBaseParser.NumericLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1683
self.number()
elif la_ == 6:
localctx = SqlBaseParser.BooleanLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1684
self.booleanValue()
elif la_ == 7:
localctx = SqlBaseParser.StringLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1685
self.string()
elif la_ == 8:
localctx = SqlBaseParser.BinaryLiteralContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1686
self.match(SqlBaseParser.BINARY_LITERAL)
elif la_ == 9:
localctx = SqlBaseParser.ParameterContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1687
self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 10:
localctx = SqlBaseParser.PositionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1688
self.match(SqlBaseParser.POSITION)
self.state = 1689
self.match(SqlBaseParser.T__1)
self.state = 1690
self.valueExpression(0)
self.state = 1691
self.match(SqlBaseParser.IN)
self.state = 1692
self.valueExpression(0)
self.state = 1693
self.match(SqlBaseParser.T__2)
elif la_ == 11:
localctx = SqlBaseParser.RowConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1695
self.match(SqlBaseParser.T__1)
self.state = 1696
self.expression()
self.state = 1699
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 1697
self.match(SqlBaseParser.T__3)
self.state = 1698
self.expression()
self.state = 1701
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==SqlBaseParser.T__3):
break
self.state = 1703
self.match(SqlBaseParser.T__2)
elif la_ == 12:
localctx = SqlBaseParser.RowConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1705
self.match(SqlBaseParser.ROW)
self.state = 1706
self.match(SqlBaseParser.T__1)
self.state = 1707
self.expression()
self.state = 1712
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1708
self.match(SqlBaseParser.T__3)
self.state = 1709
self.expression()
self.state = 1714
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1715
self.match(SqlBaseParser.T__2)
elif la_ == 13:
localctx = SqlBaseParser.ListaggContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1717
localctx.name = self.match(SqlBaseParser.LISTAGG)
self.state = 1718
self.match(SqlBaseParser.T__1)
self.state = 1720
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,221,self._ctx)
if la_ == 1:
self.state = 1719
self.setQuantifier()
self.state = 1722
self.expression()
self.state = 1725
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__3:
self.state = 1723
self.match(SqlBaseParser.T__3)
self.state = 1724
self.string()
self.state = 1730
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ON:
self.state = 1727
self.match(SqlBaseParser.ON)
self.state = 1728
self.match(SqlBaseParser.OVERFLOW)
self.state = 1729
self.listAggOverflowBehavior()
self.state = 1732
self.match(SqlBaseParser.T__2)
self.state = 1733
self.match(SqlBaseParser.WITHIN)
self.state = 1734
self.match(SqlBaseParser.GROUP)
self.state = 1735
self.match(SqlBaseParser.T__1)
self.state = 1736
self.match(SqlBaseParser.ORDER)
self.state = 1737
self.match(SqlBaseParser.BY)
self.state = 1738
self.sortItem()
self.state = 1743
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1739
self.match(SqlBaseParser.T__3)
self.state = 1740
self.sortItem()
self.state = 1745
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1746
self.match(SqlBaseParser.T__2)
elif la_ == 14:
localctx = SqlBaseParser.FunctionCallContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1749
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,225,self._ctx)
if la_ == 1:
self.state = 1748
self.processingMode()
self.state = 1751
self.qualifiedName()
self.state = 1752
self.match(SqlBaseParser.T__1)
self.state = 1756
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 1753
localctx.label = self.identifier()
self.state = 1754
self.match(SqlBaseParser.T__0)
self.state = 1758
self.match(SqlBaseParser.ASTERISK)
self.state = 1759
self.match(SqlBaseParser.T__2)
self.state = 1761
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,227,self._ctx)
if la_ == 1:
self.state = 1760
self.filter_()
self.state = 1764
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,228,self._ctx)
if la_ == 1:
self.state = 1763
self.over()
elif la_ == 15:
localctx = SqlBaseParser.FunctionCallContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1767
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,229,self._ctx)
if la_ == 1:
self.state = 1766
self.processingMode()
self.state = 1769
self.qualifiedName()
self.state = 1770
self.match(SqlBaseParser.T__1)
self.state = 1782
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTINCT - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1772
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,230,self._ctx)
if la_ == 1:
self.state = 1771
self.setQuantifier()
self.state = 1774
self.expression()
self.state = 1779
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1775
self.match(SqlBaseParser.T__3)
self.state = 1776
self.expression()
self.state = 1781
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1794
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ORDER:
self.state = 1784
self.match(SqlBaseParser.ORDER)
self.state = 1785
self.match(SqlBaseParser.BY)
self.state = 1786
self.sortItem()
self.state = 1791
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1787
self.match(SqlBaseParser.T__3)
self.state = 1788
self.sortItem()
self.state = 1793
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1796
self.match(SqlBaseParser.T__2)
self.state = 1798
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,235,self._ctx)
if la_ == 1:
self.state = 1797
self.filter_()
self.state = 1804
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,237,self._ctx)
if la_ == 1:
self.state = 1801
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.IGNORE or _la==SqlBaseParser.RESPECT:
self.state = 1800
self.nullTreatment()
self.state = 1803
self.over()
elif la_ == 16:
localctx = SqlBaseParser.MeasureContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1806
self.identifier()
self.state = 1807
self.over()
elif la_ == 17:
localctx = SqlBaseParser.Lambda_Context(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1809
self.identifier()
self.state = 1810
self.match(SqlBaseParser.T__5)
self.state = 1811
self.expression()
elif la_ == 18:
localctx = SqlBaseParser.Lambda_Context(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1813
self.match(SqlBaseParser.T__1)
self.state = 1822
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 1814
self.identifier()
self.state = 1819
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1815
self.match(SqlBaseParser.T__3)
self.state = 1816
self.identifier()
self.state = 1821
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1824
self.match(SqlBaseParser.T__2)
self.state = 1825
self.match(SqlBaseParser.T__5)
self.state = 1826
self.expression()
elif la_ == 19:
localctx = SqlBaseParser.SubqueryExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1827
self.match(SqlBaseParser.T__1)
self.state = 1828
self.query()
self.state = 1829
self.match(SqlBaseParser.T__2)
elif la_ == 20:
localctx = SqlBaseParser.ExistsContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1831
self.match(SqlBaseParser.EXISTS)
self.state = 1832
self.match(SqlBaseParser.T__1)
self.state = 1833
self.query()
self.state = 1834
self.match(SqlBaseParser.T__2)
elif la_ == 21:
localctx = SqlBaseParser.SimpleCaseContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1836
self.match(SqlBaseParser.CASE)
self.state = 1837
localctx.operand = self.expression()
self.state = 1839
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 1838
self.whenClause()
self.state = 1841
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==SqlBaseParser.WHEN):
break
self.state = 1845
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ELSE:
self.state = 1843
self.match(SqlBaseParser.ELSE)
self.state = 1844
localctx.elseExpression = self.expression()
self.state = 1847
self.match(SqlBaseParser.END)
elif la_ == 22:
localctx = SqlBaseParser.SearchedCaseContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1849
self.match(SqlBaseParser.CASE)
self.state = 1851
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 1850
self.whenClause()
self.state = 1853
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==SqlBaseParser.WHEN):
break
self.state = 1857
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.ELSE:
self.state = 1855
self.match(SqlBaseParser.ELSE)
self.state = 1856
localctx.elseExpression = self.expression()
self.state = 1859
self.match(SqlBaseParser.END)
elif la_ == 23:
localctx = SqlBaseParser.CastContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1861
self.match(SqlBaseParser.CAST)
self.state = 1862
self.match(SqlBaseParser.T__1)
self.state = 1863
self.expression()
self.state = 1864
self.match(SqlBaseParser.AS)
self.state = 1865
self.type_(0)
self.state = 1866
self.match(SqlBaseParser.T__2)
elif la_ == 24:
localctx = SqlBaseParser.CastContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1868
self.match(SqlBaseParser.TRY_CAST)
self.state = 1869
self.match(SqlBaseParser.T__1)
self.state = 1870
self.expression()
self.state = 1871
self.match(SqlBaseParser.AS)
self.state = 1872
self.type_(0)
self.state = 1873
self.match(SqlBaseParser.T__2)
elif la_ == 25:
localctx = SqlBaseParser.ArrayConstructorContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1875
self.match(SqlBaseParser.ARRAY)
self.state = 1876
self.match(SqlBaseParser.T__6)
self.state = 1885
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.T__1) | (1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CASE) | (1 << SqlBaseParser.CAST) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.CURRENT_CATALOG) | (1 << SqlBaseParser.CURRENT_DATE) | (1 << SqlBaseParser.CURRENT_PATH) | (1 << SqlBaseParser.CURRENT_SCHEMA) | (1 << SqlBaseParser.CURRENT_TIME) | (1 << SqlBaseParser.CURRENT_TIMESTAMP) | (1 << SqlBaseParser.CURRENT_USER) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXISTS - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.EXTRACT - 64)) | (1 << (SqlBaseParser.FALSE - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPING - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LISTAGG - 64)) | (1 << (SqlBaseParser.LOCAL - 64)) | (1 << (SqlBaseParser.LOCALTIME - 64)))) != 0) or ((((_la - 128)) & ~0x3f) == 0 and ((1 << (_la - 128)) & ((1 << (SqlBaseParser.LOCALTIMESTAMP - 128)) | (1 << (SqlBaseParser.LOGICAL - 128)) | (1 << (SqlBaseParser.MAP - 128)) | (1 << (SqlBaseParser.MATCH - 128)) | (1 << (SqlBaseParser.MATCHED - 128)) | (1 << (SqlBaseParser.MATCHES - 128)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 128)) | (1 << (SqlBaseParser.MATERIALIZED - 128)) | (1 << (SqlBaseParser.MEASURES - 128)) | (1 << (SqlBaseParser.MERGE - 128)) | (1 << (SqlBaseParser.MINUTE - 128)) | (1 << (SqlBaseParser.MONTH - 128)) | (1 << (SqlBaseParser.NEXT - 128)) | (1 << (SqlBaseParser.NFC - 128)) | (1 << (SqlBaseParser.NFD - 128)) | (1 << (SqlBaseParser.NFKC - 128)) | (1 << (SqlBaseParser.NFKD - 128)) | (1 << (SqlBaseParser.NO - 128)) | (1 << (SqlBaseParser.NONE - 128)) | (1 << (SqlBaseParser.NORMALIZE - 128)) | (1 << (SqlBaseParser.NOT - 128)) | (1 << (SqlBaseParser.NULL - 128)) | (1 << (SqlBaseParser.NULLIF - 128)) | (1 << (SqlBaseParser.NULLS - 128)) | (1 << (SqlBaseParser.OFFSET - 128)) | (1 << (SqlBaseParser.OMIT - 128)) | (1 << (SqlBaseParser.OF - 128)) | (1 << (SqlBaseParser.ONE - 128)) | (1 << (SqlBaseParser.ONLY - 128)) | (1 << (SqlBaseParser.OPTION - 128)) | (1 << (SqlBaseParser.ORDINALITY - 128)) | (1 << (SqlBaseParser.OUTPUT - 128)) | (1 << (SqlBaseParser.OVER - 128)) | (1 << (SqlBaseParser.OVERFLOW - 128)) | (1 << (SqlBaseParser.PARTITION - 128)) | (1 << (SqlBaseParser.PARTITIONS - 128)) | (1 << (SqlBaseParser.PAST - 128)) | (1 << (SqlBaseParser.PATH - 128)) | (1 << (SqlBaseParser.PATTERN - 128)) | (1 << (SqlBaseParser.PER - 128)) | (1 << (SqlBaseParser.PERMUTE - 128)) | (1 << (SqlBaseParser.POSITION - 128)) | (1 << (SqlBaseParser.PRECEDING - 128)) | (1 << (SqlBaseParser.PRECISION - 128)) | (1 << (SqlBaseParser.PRIVILEGES - 128)) | (1 << (SqlBaseParser.PROPERTIES - 128)) | (1 << (SqlBaseParser.RANGE - 128)) | (1 << (SqlBaseParser.READ - 128)) | (1 << (SqlBaseParser.REFRESH - 128)) | (1 << (SqlBaseParser.RENAME - 128)) | (1 << (SqlBaseParser.REPEATABLE - 128)) | (1 << (SqlBaseParser.REPLACE - 128)) | (1 << (SqlBaseParser.RESET - 128)) | (1 << (SqlBaseParser.RESPECT - 128)) | (1 << (SqlBaseParser.RESTRICT - 128)) | (1 << (SqlBaseParser.REVOKE - 128)))) != 0) or ((((_la - 192)) & ~0x3f) == 0 and ((1 << (_la - 192)) & ((1 << (SqlBaseParser.ROLE - 192)) | (1 << (SqlBaseParser.ROLES - 192)) | (1 << (SqlBaseParser.ROLLBACK - 192)) | (1 << (SqlBaseParser.ROW - 192)) | (1 << (SqlBaseParser.ROWS - 192)) | (1 << (SqlBaseParser.RUNNING - 192)) | (1 << (SqlBaseParser.SCHEMA - 192)) | (1 << (SqlBaseParser.SCHEMAS - 192)) | (1 << (SqlBaseParser.SECOND - 192)) | (1 << (SqlBaseParser.SECURITY - 192)) | (1 << (SqlBaseParser.SEEK - 192)) | (1 << (SqlBaseParser.SERIALIZABLE - 192)) | (1 << (SqlBaseParser.SESSION - 192)) | (1 << (SqlBaseParser.SET - 192)) | (1 << (SqlBaseParser.SETS - 192)) | (1 << (SqlBaseParser.SHOW - 192)) | (1 << (SqlBaseParser.SOME - 192)) | (1 << (SqlBaseParser.START - 192)) | (1 << (SqlBaseParser.STATS - 192)) | (1 << (SqlBaseParser.SUBSET - 192)) | (1 << (SqlBaseParser.SUBSTRING - 192)) | (1 << (SqlBaseParser.SYSTEM - 192)) | (1 << (SqlBaseParser.TABLES - 192)) | (1 << (SqlBaseParser.TABLESAMPLE - 192)) | (1 << (SqlBaseParser.TEXT - 192)) | (1 << (SqlBaseParser.TIES - 192)) | (1 << (SqlBaseParser.TIME - 192)) | (1 << (SqlBaseParser.TIMESTAMP - 192)) | (1 << (SqlBaseParser.TO - 192)) | (1 << (SqlBaseParser.TRANSACTION - 192)) | (1 << (SqlBaseParser.TRUE - 192)) | (1 << (SqlBaseParser.TRUNCATE - 192)) | (1 << (SqlBaseParser.TRY_CAST - 192)) | (1 << (SqlBaseParser.TYPE - 192)) | (1 << (SqlBaseParser.UNBOUNDED - 192)) | (1 << (SqlBaseParser.UNCOMMITTED - 192)) | (1 << (SqlBaseParser.UNMATCHED - 192)) | (1 << (SqlBaseParser.UPDATE - 192)) | (1 << (SqlBaseParser.USE - 192)) | (1 << (SqlBaseParser.USER - 192)) | (1 << (SqlBaseParser.VALIDATE - 192)) | (1 << (SqlBaseParser.VERBOSE - 192)) | (1 << (SqlBaseParser.VERSION - 192)) | (1 << (SqlBaseParser.VIEW - 192)) | (1 << (SqlBaseParser.WINDOW - 192)) | (1 << (SqlBaseParser.WITHIN - 192)) | (1 << (SqlBaseParser.WITHOUT - 192)) | (1 << (SqlBaseParser.WORK - 192)) | (1 << (SqlBaseParser.WRITE - 192)) | (1 << (SqlBaseParser.YEAR - 192)) | (1 << (SqlBaseParser.ZONE - 192)))) != 0) or ((((_la - 261)) & ~0x3f) == 0 and ((1 << (_la - 261)) & ((1 << (SqlBaseParser.PLUS - 261)) | (1 << (SqlBaseParser.MINUS - 261)) | (1 << (SqlBaseParser.QUESTION_MARK - 261)) | (1 << (SqlBaseParser.STRING - 261)) | (1 << (SqlBaseParser.UNICODE_STRING - 261)) | (1 << (SqlBaseParser.BINARY_LITERAL - 261)) | (1 << (SqlBaseParser.INTEGER_VALUE - 261)) | (1 << (SqlBaseParser.DECIMAL_VALUE - 261)) | (1 << (SqlBaseParser.DOUBLE_VALUE - 261)) | (1 << (SqlBaseParser.IDENTIFIER - 261)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 261)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 261)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 261)))) != 0):
self.state = 1877
self.expression()
self.state = 1882
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1878
self.match(SqlBaseParser.T__3)
self.state = 1879
self.expression()
self.state = 1884
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1887
self.match(SqlBaseParser.T__7)
elif la_ == 26:
localctx = SqlBaseParser.ColumnReferenceContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1888
self.identifier()
elif la_ == 27:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1889
localctx.name = self.match(SqlBaseParser.CURRENT_DATE)
elif la_ == 28:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1890
localctx.name = self.match(SqlBaseParser.CURRENT_TIME)
self.state = 1894
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,246,self._ctx)
if la_ == 1:
self.state = 1891
self.match(SqlBaseParser.T__1)
self.state = 1892
localctx.precision = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 1893
self.match(SqlBaseParser.T__2)
elif la_ == 29:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1896
localctx.name = self.match(SqlBaseParser.CURRENT_TIMESTAMP)
self.state = 1900
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,247,self._ctx)
if la_ == 1:
self.state = 1897
self.match(SqlBaseParser.T__1)
self.state = 1898
localctx.precision = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 1899
self.match(SqlBaseParser.T__2)
elif la_ == 30:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1902
localctx.name = self.match(SqlBaseParser.LOCALTIME)
self.state = 1906
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,248,self._ctx)
if la_ == 1:
self.state = 1903
self.match(SqlBaseParser.T__1)
self.state = 1904
localctx.precision = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 1905
self.match(SqlBaseParser.T__2)
elif la_ == 31:
localctx = SqlBaseParser.SpecialDateTimeFunctionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1908
localctx.name = self.match(SqlBaseParser.LOCALTIMESTAMP)
self.state = 1912
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,249,self._ctx)
if la_ == 1:
self.state = 1909
self.match(SqlBaseParser.T__1)
self.state = 1910
localctx.precision = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 1911
self.match(SqlBaseParser.T__2)
elif la_ == 32:
localctx = SqlBaseParser.CurrentUserContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1914
localctx.name = self.match(SqlBaseParser.CURRENT_USER)
elif la_ == 33:
localctx = SqlBaseParser.CurrentCatalogContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1915
localctx.name = self.match(SqlBaseParser.CURRENT_CATALOG)
elif la_ == 34:
localctx = SqlBaseParser.CurrentSchemaContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1916
localctx.name = self.match(SqlBaseParser.CURRENT_SCHEMA)
elif la_ == 35:
localctx = SqlBaseParser.CurrentPathContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1917
localctx.name = self.match(SqlBaseParser.CURRENT_PATH)
elif la_ == 36:
localctx = SqlBaseParser.SubstringContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1918
self.match(SqlBaseParser.SUBSTRING)
self.state = 1919
self.match(SqlBaseParser.T__1)
self.state = 1920
self.valueExpression(0)
self.state = 1921
self.match(SqlBaseParser.FROM)
self.state = 1922
self.valueExpression(0)
self.state = 1925
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.FOR:
self.state = 1923
self.match(SqlBaseParser.FOR)
self.state = 1924
self.valueExpression(0)
self.state = 1927
self.match(SqlBaseParser.T__2)
elif la_ == 37:
localctx = SqlBaseParser.NormalizeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1929
self.match(SqlBaseParser.NORMALIZE)
self.state = 1930
self.match(SqlBaseParser.T__1)
self.state = 1931
self.valueExpression(0)
self.state = 1934
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__3:
self.state = 1932
self.match(SqlBaseParser.T__3)
self.state = 1933
self.normalForm()
self.state = 1936
self.match(SqlBaseParser.T__2)
elif la_ == 38:
localctx = SqlBaseParser.ExtractContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1938
self.match(SqlBaseParser.EXTRACT)
self.state = 1939
self.match(SqlBaseParser.T__1)
self.state = 1940
self.identifier()
self.state = 1941
self.match(SqlBaseParser.FROM)
self.state = 1942
self.valueExpression(0)
self.state = 1943
self.match(SqlBaseParser.T__2)
elif la_ == 39:
localctx = SqlBaseParser.ParenthesizedExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1945
self.match(SqlBaseParser.T__1)
self.state = 1946
self.expression()
self.state = 1947
self.match(SqlBaseParser.T__2)
elif la_ == 40:
localctx = SqlBaseParser.GroupingOperationContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 1949
self.match(SqlBaseParser.GROUPING)
self.state = 1950
self.match(SqlBaseParser.T__1)
self.state = 1959
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0) or ((((_la - 274)) & ~0x3f) == 0 and ((1 << (_la - 274)) & ((1 << (SqlBaseParser.IDENTIFIER - 274)) | (1 << (SqlBaseParser.DIGIT_IDENTIFIER - 274)) | (1 << (SqlBaseParser.QUOTED_IDENTIFIER - 274)) | (1 << (SqlBaseParser.BACKQUOTED_IDENTIFIER - 274)))) != 0):
self.state = 1951
self.qualifiedName()
self.state = 1956
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 1952
self.match(SqlBaseParser.T__3)
self.state = 1953
self.qualifiedName()
self.state = 1958
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 1961
self.match(SqlBaseParser.T__2)
self._ctx.stop = self._input.LT(-1)
self.state = 1974
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,256,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 1972
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,255,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.SubscriptContext(self, SqlBaseParser.PrimaryExpressionContext(self, _parentctx, _parentState))
localctx.value = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_primaryExpression)
self.state = 1964
if not self.precpred(self._ctx, 17):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 17)")
self.state = 1965
self.match(SqlBaseParser.T__6)
self.state = 1966
localctx.index = self.valueExpression(0)
self.state = 1967
self.match(SqlBaseParser.T__7)
elif la_ == 2:
localctx = SqlBaseParser.DereferenceContext(self, SqlBaseParser.PrimaryExpressionContext(self, _parentctx, _parentState))
localctx.base = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_primaryExpression)
self.state = 1969
if not self.precpred(self._ctx, 15):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 15)")
self.state = 1970
self.match(SqlBaseParser.T__0)
self.state = 1971
localctx.fieldName = self.identifier()
self.state = 1976
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,256,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ProcessingModeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RUNNING(self):
return self.getToken(SqlBaseParser.RUNNING, 0)
def FINAL(self):
return self.getToken(SqlBaseParser.FINAL, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_processingMode
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProcessingMode" ):
listener.enterProcessingMode(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProcessingMode" ):
listener.exitProcessingMode(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProcessingMode" ):
return visitor.visitProcessingMode(self)
else:
return visitor.visitChildren(self)
def processingMode(self):
localctx = SqlBaseParser.ProcessingModeContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_processingMode)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 1977
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FINAL or _la==SqlBaseParser.RUNNING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NullTreatmentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IGNORE(self):
return self.getToken(SqlBaseParser.IGNORE, 0)
def NULLS(self):
return self.getToken(SqlBaseParser.NULLS, 0)
def RESPECT(self):
return self.getToken(SqlBaseParser.RESPECT, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_nullTreatment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNullTreatment" ):
listener.enterNullTreatment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNullTreatment" ):
listener.exitNullTreatment(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNullTreatment" ):
return visitor.visitNullTreatment(self)
else:
return visitor.visitChildren(self)
def nullTreatment(self):
localctx = SqlBaseParser.NullTreatmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_nullTreatment)
try:
self.state = 1983
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.IGNORE]:
self.enterOuterAlt(localctx, 1)
self.state = 1979
self.match(SqlBaseParser.IGNORE)
self.state = 1980
self.match(SqlBaseParser.NULLS)
elif token in [SqlBaseParser.RESPECT]:
self.enterOuterAlt(localctx, 2)
self.state = 1981
self.match(SqlBaseParser.RESPECT)
self.state = 1982
self.match(SqlBaseParser.NULLS)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StringContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_string
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class UnicodeStringLiteralContext(StringContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StringContext
super().__init__(parser)
self.copyFrom(ctx)
def UNICODE_STRING(self):
return self.getToken(SqlBaseParser.UNICODE_STRING, 0)
def UESCAPE(self):
return self.getToken(SqlBaseParser.UESCAPE, 0)
def STRING(self):
return self.getToken(SqlBaseParser.STRING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnicodeStringLiteral" ):
listener.enterUnicodeStringLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnicodeStringLiteral" ):
listener.exitUnicodeStringLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnicodeStringLiteral" ):
return visitor.visitUnicodeStringLiteral(self)
else:
return visitor.visitChildren(self)
class BasicStringLiteralContext(StringContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.StringContext
super().__init__(parser)
self.copyFrom(ctx)
def STRING(self):
return self.getToken(SqlBaseParser.STRING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBasicStringLiteral" ):
listener.enterBasicStringLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBasicStringLiteral" ):
listener.exitBasicStringLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBasicStringLiteral" ):
return visitor.visitBasicStringLiteral(self)
else:
return visitor.visitChildren(self)
def string(self):
localctx = SqlBaseParser.StringContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_string)
try:
self.state = 1991
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.STRING]:
localctx = SqlBaseParser.BasicStringLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1985
self.match(SqlBaseParser.STRING)
elif token in [SqlBaseParser.UNICODE_STRING]:
localctx = SqlBaseParser.UnicodeStringLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1986
self.match(SqlBaseParser.UNICODE_STRING)
self.state = 1989
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,258,self._ctx)
if la_ == 1:
self.state = 1987
self.match(SqlBaseParser.UESCAPE)
self.state = 1988
self.match(SqlBaseParser.STRING)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TimeZoneSpecifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_timeZoneSpecifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class TimeZoneIntervalContext(TimeZoneSpecifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.TimeZoneSpecifierContext
super().__init__(parser)
self.copyFrom(ctx)
def TIME(self):
return self.getToken(SqlBaseParser.TIME, 0)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def interval(self):
return self.getTypedRuleContext(SqlBaseParser.IntervalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTimeZoneInterval" ):
listener.enterTimeZoneInterval(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTimeZoneInterval" ):
listener.exitTimeZoneInterval(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTimeZoneInterval" ):
return visitor.visitTimeZoneInterval(self)
else:
return visitor.visitChildren(self)
class TimeZoneStringContext(TimeZoneSpecifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.TimeZoneSpecifierContext
super().__init__(parser)
self.copyFrom(ctx)
def TIME(self):
return self.getToken(SqlBaseParser.TIME, 0)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTimeZoneString" ):
listener.enterTimeZoneString(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTimeZoneString" ):
listener.exitTimeZoneString(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTimeZoneString" ):
return visitor.visitTimeZoneString(self)
else:
return visitor.visitChildren(self)
def timeZoneSpecifier(self):
localctx = SqlBaseParser.TimeZoneSpecifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_timeZoneSpecifier)
try:
self.state = 1999
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,260,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.TimeZoneIntervalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 1993
self.match(SqlBaseParser.TIME)
self.state = 1994
self.match(SqlBaseParser.ZONE)
self.state = 1995
self.interval()
elif la_ == 2:
localctx = SqlBaseParser.TimeZoneStringContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 1996
self.match(SqlBaseParser.TIME)
self.state = 1997
self.match(SqlBaseParser.ZONE)
self.state = 1998
self.string()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ComparisonOperatorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def NEQ(self):
return self.getToken(SqlBaseParser.NEQ, 0)
def LT(self):
return self.getToken(SqlBaseParser.LT, 0)
def LTE(self):
return self.getToken(SqlBaseParser.LTE, 0)
def GT(self):
return self.getToken(SqlBaseParser.GT, 0)
def GTE(self):
return self.getToken(SqlBaseParser.GTE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_comparisonOperator
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComparisonOperator" ):
listener.enterComparisonOperator(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComparisonOperator" ):
listener.exitComparisonOperator(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitComparisonOperator" ):
return visitor.visitComparisonOperator(self)
else:
return visitor.visitChildren(self)
def comparisonOperator(self):
localctx = SqlBaseParser.ComparisonOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 110, self.RULE_comparisonOperator)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2001
_la = self._input.LA(1)
if not(((((_la - 255)) & ~0x3f) == 0 and ((1 << (_la - 255)) & ((1 << (SqlBaseParser.EQ - 255)) | (1 << (SqlBaseParser.NEQ - 255)) | (1 << (SqlBaseParser.LT - 255)) | (1 << (SqlBaseParser.LTE - 255)) | (1 << (SqlBaseParser.GT - 255)) | (1 << (SqlBaseParser.GTE - 255)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ComparisonQuantifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def SOME(self):
return self.getToken(SqlBaseParser.SOME, 0)
def ANY(self):
return self.getToken(SqlBaseParser.ANY, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_comparisonQuantifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComparisonQuantifier" ):
listener.enterComparisonQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComparisonQuantifier" ):
listener.exitComparisonQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitComparisonQuantifier" ):
return visitor.visitComparisonQuantifier(self)
else:
return visitor.visitChildren(self)
def comparisonQuantifier(self):
localctx = SqlBaseParser.ComparisonQuantifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_comparisonQuantifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2003
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ALL or _la==SqlBaseParser.ANY or _la==SqlBaseParser.SOME):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BooleanValueContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TRUE(self):
return self.getToken(SqlBaseParser.TRUE, 0)
def FALSE(self):
return self.getToken(SqlBaseParser.FALSE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_booleanValue
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBooleanValue" ):
listener.enterBooleanValue(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBooleanValue" ):
listener.exitBooleanValue(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBooleanValue" ):
return visitor.visitBooleanValue(self)
else:
return visitor.visitChildren(self)
def booleanValue(self):
localctx = SqlBaseParser.BooleanValueContext(self, self._ctx, self.state)
self.enterRule(localctx, 114, self.RULE_booleanValue)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2005
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FALSE or _la==SqlBaseParser.TRUE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntervalContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.sign = None # Token
self.from_ = None # IntervalFieldContext
self.to = None # IntervalFieldContext
def INTERVAL(self):
return self.getToken(SqlBaseParser.INTERVAL, 0)
def string(self):
return self.getTypedRuleContext(SqlBaseParser.StringContext,0)
def intervalField(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IntervalFieldContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IntervalFieldContext,i)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def PLUS(self):
return self.getToken(SqlBaseParser.PLUS, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_interval
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInterval" ):
listener.enterInterval(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInterval" ):
listener.exitInterval(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInterval" ):
return visitor.visitInterval(self)
else:
return visitor.visitChildren(self)
def interval(self):
localctx = SqlBaseParser.IntervalContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_interval)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2007
self.match(SqlBaseParser.INTERVAL)
self.state = 2009
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.PLUS or _la==SqlBaseParser.MINUS:
self.state = 2008
localctx.sign = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.PLUS or _la==SqlBaseParser.MINUS):
localctx.sign = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 2011
self.string()
self.state = 2012
localctx.from_ = self.intervalField()
self.state = 2015
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,262,self._ctx)
if la_ == 1:
self.state = 2013
self.match(SqlBaseParser.TO)
self.state = 2014
localctx.to = self.intervalField()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntervalFieldContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def YEAR(self):
return self.getToken(SqlBaseParser.YEAR, 0)
def MONTH(self):
return self.getToken(SqlBaseParser.MONTH, 0)
def DAY(self):
return self.getToken(SqlBaseParser.DAY, 0)
def HOUR(self):
return self.getToken(SqlBaseParser.HOUR, 0)
def MINUTE(self):
return self.getToken(SqlBaseParser.MINUTE, 0)
def SECOND(self):
return self.getToken(SqlBaseParser.SECOND, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_intervalField
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntervalField" ):
listener.enterIntervalField(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntervalField" ):
listener.exitIntervalField(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntervalField" ):
return visitor.visitIntervalField(self)
else:
return visitor.visitChildren(self)
def intervalField(self):
localctx = SqlBaseParser.IntervalFieldContext(self, self._ctx, self.state)
self.enterRule(localctx, 118, self.RULE_intervalField)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2017
_la = self._input.LA(1)
if not(_la==SqlBaseParser.DAY or ((((_la - 101)) & ~0x3f) == 0 and ((1 << (_la - 101)) & ((1 << (SqlBaseParser.HOUR - 101)) | (1 << (SqlBaseParser.MINUTE - 101)) | (1 << (SqlBaseParser.MONTH - 101)))) != 0) or _la==SqlBaseParser.SECOND or _la==SqlBaseParser.YEAR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NormalFormContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def NFD(self):
return self.getToken(SqlBaseParser.NFD, 0)
def NFC(self):
return self.getToken(SqlBaseParser.NFC, 0)
def NFKD(self):
return self.getToken(SqlBaseParser.NFKD, 0)
def NFKC(self):
return self.getToken(SqlBaseParser.NFKC, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_normalForm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNormalForm" ):
listener.enterNormalForm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNormalForm" ):
listener.exitNormalForm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNormalForm" ):
return visitor.visitNormalForm(self)
else:
return visitor.visitChildren(self)
def normalForm(self):
localctx = SqlBaseParser.NormalFormContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_normalForm)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2019
_la = self._input.LA(1)
if not(((((_la - 142)) & ~0x3f) == 0 and ((1 << (_la - 142)) & ((1 << (SqlBaseParser.NFC - 142)) | (1 << (SqlBaseParser.NFD - 142)) | (1 << (SqlBaseParser.NFKC - 142)) | (1 << (SqlBaseParser.NFKD - 142)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Type_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_type_
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RowTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def rowField(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowFieldContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowFieldContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowType" ):
listener.enterRowType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowType" ):
listener.exitRowType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowType" ):
return visitor.visitRowType(self)
else:
return visitor.visitChildren(self)
class IntervalTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.from_ = None # IntervalFieldContext
self.to = None # IntervalFieldContext
self.copyFrom(ctx)
def INTERVAL(self):
return self.getToken(SqlBaseParser.INTERVAL, 0)
def intervalField(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IntervalFieldContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IntervalFieldContext,i)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntervalType" ):
listener.enterIntervalType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntervalType" ):
listener.exitIntervalType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntervalType" ):
return visitor.visitIntervalType(self)
else:
return visitor.visitChildren(self)
class ArrayTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def ARRAY(self):
return self.getToken(SqlBaseParser.ARRAY, 0)
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArrayType" ):
listener.enterArrayType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArrayType" ):
listener.exitArrayType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArrayType" ):
return visitor.visitArrayType(self)
else:
return visitor.visitChildren(self)
class DoublePrecisionTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def DOUBLE(self):
return self.getToken(SqlBaseParser.DOUBLE, 0)
def PRECISION(self):
return self.getToken(SqlBaseParser.PRECISION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDoublePrecisionType" ):
listener.enterDoublePrecisionType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDoublePrecisionType" ):
listener.exitDoublePrecisionType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDoublePrecisionType" ):
return visitor.visitDoublePrecisionType(self)
else:
return visitor.visitChildren(self)
class LegacyArrayTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def ARRAY(self):
return self.getToken(SqlBaseParser.ARRAY, 0)
def LT(self):
return self.getToken(SqlBaseParser.LT, 0)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def GT(self):
return self.getToken(SqlBaseParser.GT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLegacyArrayType" ):
listener.enterLegacyArrayType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLegacyArrayType" ):
listener.exitLegacyArrayType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLegacyArrayType" ):
return visitor.visitLegacyArrayType(self)
else:
return visitor.visitChildren(self)
class GenericTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def typeParameter(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.TypeParameterContext)
else:
return self.getTypedRuleContext(SqlBaseParser.TypeParameterContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenericType" ):
listener.enterGenericType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenericType" ):
listener.exitGenericType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGenericType" ):
return visitor.visitGenericType(self)
else:
return visitor.visitChildren(self)
class DateTimeTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.base = None # Token
self.precision = None # TypeParameterContext
self.copyFrom(ctx)
def TIMESTAMP(self):
return self.getToken(SqlBaseParser.TIMESTAMP, 0)
def WITHOUT(self):
return self.getToken(SqlBaseParser.WITHOUT, 0)
def TIME(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.TIME)
else:
return self.getToken(SqlBaseParser.TIME, i)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def typeParameter(self):
return self.getTypedRuleContext(SqlBaseParser.TypeParameterContext,0)
def WITH(self):
return self.getToken(SqlBaseParser.WITH, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDateTimeType" ):
listener.enterDateTimeType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDateTimeType" ):
listener.exitDateTimeType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDateTimeType" ):
return visitor.visitDateTimeType(self)
else:
return visitor.visitChildren(self)
class LegacyMapTypeContext(Type_Context):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.Type_Context
super().__init__(parser)
self.keyType = None # Type_Context
self.valueType = None # Type_Context
self.copyFrom(ctx)
def MAP(self):
return self.getToken(SqlBaseParser.MAP, 0)
def LT(self):
return self.getToken(SqlBaseParser.LT, 0)
def GT(self):
return self.getToken(SqlBaseParser.GT, 0)
def type_(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.Type_Context)
else:
return self.getTypedRuleContext(SqlBaseParser.Type_Context,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLegacyMapType" ):
listener.enterLegacyMapType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLegacyMapType" ):
listener.exitLegacyMapType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLegacyMapType" ):
return visitor.visitLegacyMapType(self)
else:
return visitor.visitChildren(self)
def type_(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.Type_Context(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 122
self.enterRecursionRule(localctx, 122, self.RULE_type_, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2112
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,273,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.RowTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2022
self.match(SqlBaseParser.ROW)
self.state = 2023
self.match(SqlBaseParser.T__1)
self.state = 2024
self.rowField()
self.state = 2029
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2025
self.match(SqlBaseParser.T__3)
self.state = 2026
self.rowField()
self.state = 2031
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2032
self.match(SqlBaseParser.T__2)
elif la_ == 2:
localctx = SqlBaseParser.IntervalTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2034
self.match(SqlBaseParser.INTERVAL)
self.state = 2035
localctx.from_ = self.intervalField()
self.state = 2038
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,264,self._ctx)
if la_ == 1:
self.state = 2036
self.match(SqlBaseParser.TO)
self.state = 2037
localctx.to = self.intervalField()
elif la_ == 3:
localctx = SqlBaseParser.DateTimeTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2040
localctx.base = self.match(SqlBaseParser.TIMESTAMP)
self.state = 2045
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,265,self._ctx)
if la_ == 1:
self.state = 2041
self.match(SqlBaseParser.T__1)
self.state = 2042
localctx.precision = self.typeParameter()
self.state = 2043
self.match(SqlBaseParser.T__2)
self.state = 2050
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,266,self._ctx)
if la_ == 1:
self.state = 2047
self.match(SqlBaseParser.WITHOUT)
self.state = 2048
self.match(SqlBaseParser.TIME)
self.state = 2049
self.match(SqlBaseParser.ZONE)
elif la_ == 4:
localctx = SqlBaseParser.DateTimeTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2052
localctx.base = self.match(SqlBaseParser.TIMESTAMP)
self.state = 2057
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 2053
self.match(SqlBaseParser.T__1)
self.state = 2054
localctx.precision = self.typeParameter()
self.state = 2055
self.match(SqlBaseParser.T__2)
self.state = 2059
self.match(SqlBaseParser.WITH)
self.state = 2060
self.match(SqlBaseParser.TIME)
self.state = 2061
self.match(SqlBaseParser.ZONE)
elif la_ == 5:
localctx = SqlBaseParser.DateTimeTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2062
localctx.base = self.match(SqlBaseParser.TIME)
self.state = 2067
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,268,self._ctx)
if la_ == 1:
self.state = 2063
self.match(SqlBaseParser.T__1)
self.state = 2064
localctx.precision = self.typeParameter()
self.state = 2065
self.match(SqlBaseParser.T__2)
self.state = 2072
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,269,self._ctx)
if la_ == 1:
self.state = 2069
self.match(SqlBaseParser.WITHOUT)
self.state = 2070
self.match(SqlBaseParser.TIME)
self.state = 2071
self.match(SqlBaseParser.ZONE)
elif la_ == 6:
localctx = SqlBaseParser.DateTimeTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2074
localctx.base = self.match(SqlBaseParser.TIME)
self.state = 2079
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 2075
self.match(SqlBaseParser.T__1)
self.state = 2076
localctx.precision = self.typeParameter()
self.state = 2077
self.match(SqlBaseParser.T__2)
self.state = 2081
self.match(SqlBaseParser.WITH)
self.state = 2082
self.match(SqlBaseParser.TIME)
self.state = 2083
self.match(SqlBaseParser.ZONE)
elif la_ == 7:
localctx = SqlBaseParser.DoublePrecisionTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2084
self.match(SqlBaseParser.DOUBLE)
self.state = 2085
self.match(SqlBaseParser.PRECISION)
elif la_ == 8:
localctx = SqlBaseParser.LegacyArrayTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2086
self.match(SqlBaseParser.ARRAY)
self.state = 2087
self.match(SqlBaseParser.LT)
self.state = 2088
self.type_(0)
self.state = 2089
self.match(SqlBaseParser.GT)
elif la_ == 9:
localctx = SqlBaseParser.LegacyMapTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2091
self.match(SqlBaseParser.MAP)
self.state = 2092
self.match(SqlBaseParser.LT)
self.state = 2093
localctx.keyType = self.type_(0)
self.state = 2094
self.match(SqlBaseParser.T__3)
self.state = 2095
localctx.valueType = self.type_(0)
self.state = 2096
self.match(SqlBaseParser.GT)
elif la_ == 10:
localctx = SqlBaseParser.GenericTypeContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2098
self.identifier()
self.state = 2110
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,272,self._ctx)
if la_ == 1:
self.state = 2099
self.match(SqlBaseParser.T__1)
self.state = 2100
self.typeParameter()
self.state = 2105
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2101
self.match(SqlBaseParser.T__3)
self.state = 2102
self.typeParameter()
self.state = 2107
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2108
self.match(SqlBaseParser.T__2)
self._ctx.stop = self._input.LT(-1)
self.state = 2123
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,275,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = SqlBaseParser.ArrayTypeContext(self, SqlBaseParser.Type_Context(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_type_)
self.state = 2114
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 2115
self.match(SqlBaseParser.ARRAY)
self.state = 2119
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,274,self._ctx)
if la_ == 1:
self.state = 2116
self.match(SqlBaseParser.T__6)
self.state = 2117
self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 2118
self.match(SqlBaseParser.T__7)
self.state = 2125
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,275,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class RowFieldContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_rowField
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRowField" ):
listener.enterRowField(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRowField" ):
listener.exitRowField(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRowField" ):
return visitor.visitRowField(self)
else:
return visitor.visitChildren(self)
def rowField(self):
localctx = SqlBaseParser.RowFieldContext(self, self._ctx, self.state)
self.enterRule(localctx, 124, self.RULE_rowField)
try:
self.state = 2130
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,276,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 2126
self.type_(0)
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 2127
self.identifier()
self.state = 2128
self.type_(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TypeParameterContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def type_(self):
return self.getTypedRuleContext(SqlBaseParser.Type_Context,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_typeParameter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTypeParameter" ):
listener.enterTypeParameter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTypeParameter" ):
listener.exitTypeParameter(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTypeParameter" ):
return visitor.visitTypeParameter(self)
else:
return visitor.visitChildren(self)
def typeParameter(self):
localctx = SqlBaseParser.TypeParameterContext(self, self._ctx, self.state)
self.enterRule(localctx, 126, self.RULE_typeParameter)
try:
self.state = 2134
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.INTEGER_VALUE]:
self.enterOuterAlt(localctx, 1)
self.state = 2132
self.match(SqlBaseParser.INTEGER_VALUE)
elif token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
self.enterOuterAlt(localctx, 2)
self.state = 2133
self.type_(0)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WhenClauseContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.condition = None # ExpressionContext
self.result = None # ExpressionContext
def WHEN(self):
return self.getToken(SqlBaseParser.WHEN, 0)
def THEN(self):
return self.getToken(SqlBaseParser.THEN, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_whenClause
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWhenClause" ):
listener.enterWhenClause(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWhenClause" ):
listener.exitWhenClause(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWhenClause" ):
return visitor.visitWhenClause(self)
else:
return visitor.visitChildren(self)
def whenClause(self):
localctx = SqlBaseParser.WhenClauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 128, self.RULE_whenClause)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2136
self.match(SqlBaseParser.WHEN)
self.state = 2137
localctx.condition = self.expression()
self.state = 2138
self.match(SqlBaseParser.THEN)
self.state = 2139
localctx.result = self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Filter_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def FILTER(self):
return self.getToken(SqlBaseParser.FILTER, 0)
def WHERE(self):
return self.getToken(SqlBaseParser.WHERE, 0)
def booleanExpression(self):
return self.getTypedRuleContext(SqlBaseParser.BooleanExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_filter_
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFilter_" ):
listener.enterFilter_(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFilter_" ):
listener.exitFilter_(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFilter_" ):
return visitor.visitFilter_(self)
else:
return visitor.visitChildren(self)
def filter_(self):
localctx = SqlBaseParser.Filter_Context(self, self._ctx, self.state)
self.enterRule(localctx, 130, self.RULE_filter_)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2141
self.match(SqlBaseParser.FILTER)
self.state = 2142
self.match(SqlBaseParser.T__1)
self.state = 2143
self.match(SqlBaseParser.WHERE)
self.state = 2144
self.booleanExpression(0)
self.state = 2145
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MergeCaseContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_mergeCase
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class MergeInsertContext(MergeCaseContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.MergeCaseContext
super().__init__(parser)
self.condition = None # ExpressionContext
self._identifier = None # IdentifierContext
self.targets = list() # of IdentifierContexts
self._expression = None # ExpressionContext
self.values = list() # of ExpressionContexts
self.copyFrom(ctx)
def WHEN(self):
return self.getToken(SqlBaseParser.WHEN, 0)
def NOT(self):
return self.getToken(SqlBaseParser.NOT, 0)
def MATCHED(self):
return self.getToken(SqlBaseParser.MATCHED, 0)
def THEN(self):
return self.getToken(SqlBaseParser.THEN, 0)
def INSERT(self):
return self.getToken(SqlBaseParser.INSERT, 0)
def VALUES(self):
return self.getToken(SqlBaseParser.VALUES, 0)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMergeInsert" ):
listener.enterMergeInsert(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMergeInsert" ):
listener.exitMergeInsert(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMergeInsert" ):
return visitor.visitMergeInsert(self)
else:
return visitor.visitChildren(self)
class MergeUpdateContext(MergeCaseContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.MergeCaseContext
super().__init__(parser)
self.condition = None # ExpressionContext
self._identifier = None # IdentifierContext
self.targets = list() # of IdentifierContexts
self._expression = None # ExpressionContext
self.values = list() # of ExpressionContexts
self.copyFrom(ctx)
def WHEN(self):
return self.getToken(SqlBaseParser.WHEN, 0)
def MATCHED(self):
return self.getToken(SqlBaseParser.MATCHED, 0)
def THEN(self):
return self.getToken(SqlBaseParser.THEN, 0)
def UPDATE(self):
return self.getToken(SqlBaseParser.UPDATE, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def EQ(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.EQ)
else:
return self.getToken(SqlBaseParser.EQ, i)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.ExpressionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,i)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMergeUpdate" ):
listener.enterMergeUpdate(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMergeUpdate" ):
listener.exitMergeUpdate(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMergeUpdate" ):
return visitor.visitMergeUpdate(self)
else:
return visitor.visitChildren(self)
class MergeDeleteContext(MergeCaseContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.MergeCaseContext
super().__init__(parser)
self.condition = None # ExpressionContext
self.copyFrom(ctx)
def WHEN(self):
return self.getToken(SqlBaseParser.WHEN, 0)
def MATCHED(self):
return self.getToken(SqlBaseParser.MATCHED, 0)
def THEN(self):
return self.getToken(SqlBaseParser.THEN, 0)
def DELETE(self):
return self.getToken(SqlBaseParser.DELETE, 0)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMergeDelete" ):
listener.enterMergeDelete(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMergeDelete" ):
listener.exitMergeDelete(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMergeDelete" ):
return visitor.visitMergeDelete(self)
else:
return visitor.visitChildren(self)
def mergeCase(self):
localctx = SqlBaseParser.MergeCaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 132, self.RULE_mergeCase)
self._la = 0 # Token type
try:
self.state = 2211
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,285,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.MergeUpdateContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2147
self.match(SqlBaseParser.WHEN)
self.state = 2148
self.match(SqlBaseParser.MATCHED)
self.state = 2151
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AND:
self.state = 2149
self.match(SqlBaseParser.AND)
self.state = 2150
localctx.condition = self.expression()
self.state = 2153
self.match(SqlBaseParser.THEN)
self.state = 2154
self.match(SqlBaseParser.UPDATE)
self.state = 2155
self.match(SqlBaseParser.SET)
self.state = 2156
localctx._identifier = self.identifier()
localctx.targets.append(localctx._identifier)
self.state = 2157
self.match(SqlBaseParser.EQ)
self.state = 2158
localctx._expression = self.expression()
localctx.values.append(localctx._expression)
self.state = 2166
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2159
self.match(SqlBaseParser.T__3)
self.state = 2160
localctx._identifier = self.identifier()
localctx.targets.append(localctx._identifier)
self.state = 2161
self.match(SqlBaseParser.EQ)
self.state = 2162
localctx._expression = self.expression()
localctx.values.append(localctx._expression)
self.state = 2168
self._errHandler.sync(self)
_la = self._input.LA(1)
elif la_ == 2:
localctx = SqlBaseParser.MergeDeleteContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2169
self.match(SqlBaseParser.WHEN)
self.state = 2170
self.match(SqlBaseParser.MATCHED)
self.state = 2173
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AND:
self.state = 2171
self.match(SqlBaseParser.AND)
self.state = 2172
localctx.condition = self.expression()
self.state = 2175
self.match(SqlBaseParser.THEN)
self.state = 2176
self.match(SqlBaseParser.DELETE)
elif la_ == 3:
localctx = SqlBaseParser.MergeInsertContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2177
self.match(SqlBaseParser.WHEN)
self.state = 2178
self.match(SqlBaseParser.NOT)
self.state = 2179
self.match(SqlBaseParser.MATCHED)
self.state = 2182
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AND:
self.state = 2180
self.match(SqlBaseParser.AND)
self.state = 2181
localctx.condition = self.expression()
self.state = 2184
self.match(SqlBaseParser.THEN)
self.state = 2185
self.match(SqlBaseParser.INSERT)
self.state = 2197
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.T__1:
self.state = 2186
self.match(SqlBaseParser.T__1)
self.state = 2187
localctx._identifier = self.identifier()
localctx.targets.append(localctx._identifier)
self.state = 2192
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2188
self.match(SqlBaseParser.T__3)
self.state = 2189
localctx._identifier = self.identifier()
localctx.targets.append(localctx._identifier)
self.state = 2194
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2195
self.match(SqlBaseParser.T__2)
self.state = 2199
self.match(SqlBaseParser.VALUES)
self.state = 2200
self.match(SqlBaseParser.T__1)
self.state = 2201
localctx._expression = self.expression()
localctx.values.append(localctx._expression)
self.state = 2206
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2202
self.match(SqlBaseParser.T__3)
self.state = 2203
localctx._expression = self.expression()
localctx.values.append(localctx._expression)
self.state = 2208
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2209
self.match(SqlBaseParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OverContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.windowName = None # IdentifierContext
def OVER(self):
return self.getToken(SqlBaseParser.OVER, 0)
def windowSpecification(self):
return self.getTypedRuleContext(SqlBaseParser.WindowSpecificationContext,0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_over
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOver" ):
listener.enterOver(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOver" ):
listener.exitOver(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOver" ):
return visitor.visitOver(self)
else:
return visitor.visitChildren(self)
def over(self):
localctx = SqlBaseParser.OverContext(self, self._ctx, self.state)
self.enterRule(localctx, 134, self.RULE_over)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2213
self.match(SqlBaseParser.OVER)
self.state = 2219
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
self.state = 2214
localctx.windowName = self.identifier()
elif token in [SqlBaseParser.T__1]:
self.state = 2215
self.match(SqlBaseParser.T__1)
self.state = 2216
self.windowSpecification()
self.state = 2217
self.match(SqlBaseParser.T__2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WindowFrameContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def frameExtent(self):
return self.getTypedRuleContext(SqlBaseParser.FrameExtentContext,0)
def MEASURES(self):
return self.getToken(SqlBaseParser.MEASURES, 0)
def measureDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.MeasureDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.MeasureDefinitionContext,i)
def AFTER(self):
return self.getToken(SqlBaseParser.AFTER, 0)
def MATCH(self):
return self.getToken(SqlBaseParser.MATCH, 0)
def skipTo(self):
return self.getTypedRuleContext(SqlBaseParser.SkipToContext,0)
def PATTERN(self):
return self.getToken(SqlBaseParser.PATTERN, 0)
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def SUBSET(self):
return self.getToken(SqlBaseParser.SUBSET, 0)
def subsetDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.SubsetDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.SubsetDefinitionContext,i)
def DEFINE(self):
return self.getToken(SqlBaseParser.DEFINE, 0)
def variableDefinition(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.VariableDefinitionContext)
else:
return self.getTypedRuleContext(SqlBaseParser.VariableDefinitionContext,i)
def INITIAL(self):
return self.getToken(SqlBaseParser.INITIAL, 0)
def SEEK(self):
return self.getToken(SqlBaseParser.SEEK, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_windowFrame
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWindowFrame" ):
listener.enterWindowFrame(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWindowFrame" ):
listener.exitWindowFrame(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWindowFrame" ):
return visitor.visitWindowFrame(self)
else:
return visitor.visitChildren(self)
def windowFrame(self):
localctx = SqlBaseParser.WindowFrameContext(self, self._ctx, self.state)
self.enterRule(localctx, 136, self.RULE_windowFrame)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2230
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MEASURES:
self.state = 2221
self.match(SqlBaseParser.MEASURES)
self.state = 2222
self.measureDefinition()
self.state = 2227
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2223
self.match(SqlBaseParser.T__3)
self.state = 2224
self.measureDefinition()
self.state = 2229
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2232
self.frameExtent()
self.state = 2236
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.AFTER:
self.state = 2233
self.match(SqlBaseParser.AFTER)
self.state = 2234
self.match(SqlBaseParser.MATCH)
self.state = 2235
self.skipTo()
self.state = 2239
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INITIAL or _la==SqlBaseParser.SEEK:
self.state = 2238
_la = self._input.LA(1)
if not(_la==SqlBaseParser.INITIAL or _la==SqlBaseParser.SEEK):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 2246
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.PATTERN:
self.state = 2241
self.match(SqlBaseParser.PATTERN)
self.state = 2242
self.match(SqlBaseParser.T__1)
self.state = 2243
self.rowPattern(0)
self.state = 2244
self.match(SqlBaseParser.T__2)
self.state = 2257
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.SUBSET:
self.state = 2248
self.match(SqlBaseParser.SUBSET)
self.state = 2249
self.subsetDefinition()
self.state = 2254
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2250
self.match(SqlBaseParser.T__3)
self.state = 2251
self.subsetDefinition()
self.state = 2256
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2268
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.DEFINE:
self.state = 2259
self.match(SqlBaseParser.DEFINE)
self.state = 2260
self.variableDefinition()
self.state = 2265
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2261
self.match(SqlBaseParser.T__3)
self.state = 2262
self.variableDefinition()
self.state = 2267
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FrameExtentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.frameType = None # Token
self.start = None # FrameBoundContext
self.end = None # FrameBoundContext
def RANGE(self):
return self.getToken(SqlBaseParser.RANGE, 0)
def frameBound(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.FrameBoundContext)
else:
return self.getTypedRuleContext(SqlBaseParser.FrameBoundContext,i)
def ROWS(self):
return self.getToken(SqlBaseParser.ROWS, 0)
def GROUPS(self):
return self.getToken(SqlBaseParser.GROUPS, 0)
def BETWEEN(self):
return self.getToken(SqlBaseParser.BETWEEN, 0)
def AND(self):
return self.getToken(SqlBaseParser.AND, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_frameExtent
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFrameExtent" ):
listener.enterFrameExtent(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFrameExtent" ):
listener.exitFrameExtent(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFrameExtent" ):
return visitor.visitFrameExtent(self)
else:
return visitor.visitChildren(self)
def frameExtent(self):
localctx = SqlBaseParser.FrameExtentContext(self, self._ctx, self.state)
self.enterRule(localctx, 138, self.RULE_frameExtent)
try:
self.state = 2294
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,296,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 2270
localctx.frameType = self.match(SqlBaseParser.RANGE)
self.state = 2271
localctx.start = self.frameBound()
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 2272
localctx.frameType = self.match(SqlBaseParser.ROWS)
self.state = 2273
localctx.start = self.frameBound()
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 2274
localctx.frameType = self.match(SqlBaseParser.GROUPS)
self.state = 2275
localctx.start = self.frameBound()
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 2276
localctx.frameType = self.match(SqlBaseParser.RANGE)
self.state = 2277
self.match(SqlBaseParser.BETWEEN)
self.state = 2278
localctx.start = self.frameBound()
self.state = 2279
self.match(SqlBaseParser.AND)
self.state = 2280
localctx.end = self.frameBound()
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 2282
localctx.frameType = self.match(SqlBaseParser.ROWS)
self.state = 2283
self.match(SqlBaseParser.BETWEEN)
self.state = 2284
localctx.start = self.frameBound()
self.state = 2285
self.match(SqlBaseParser.AND)
self.state = 2286
localctx.end = self.frameBound()
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 2288
localctx.frameType = self.match(SqlBaseParser.GROUPS)
self.state = 2289
self.match(SqlBaseParser.BETWEEN)
self.state = 2290
localctx.start = self.frameBound()
self.state = 2291
self.match(SqlBaseParser.AND)
self.state = 2292
localctx.end = self.frameBound()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FrameBoundContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_frameBound
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class BoundedFrameContext(FrameBoundContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.FrameBoundContext
super().__init__(parser)
self.boundType = None # Token
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def PRECEDING(self):
return self.getToken(SqlBaseParser.PRECEDING, 0)
def FOLLOWING(self):
return self.getToken(SqlBaseParser.FOLLOWING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBoundedFrame" ):
listener.enterBoundedFrame(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBoundedFrame" ):
listener.exitBoundedFrame(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBoundedFrame" ):
return visitor.visitBoundedFrame(self)
else:
return visitor.visitChildren(self)
class UnboundedFrameContext(FrameBoundContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.FrameBoundContext
super().__init__(parser)
self.boundType = None # Token
self.copyFrom(ctx)
def UNBOUNDED(self):
return self.getToken(SqlBaseParser.UNBOUNDED, 0)
def PRECEDING(self):
return self.getToken(SqlBaseParser.PRECEDING, 0)
def FOLLOWING(self):
return self.getToken(SqlBaseParser.FOLLOWING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnboundedFrame" ):
listener.enterUnboundedFrame(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnboundedFrame" ):
listener.exitUnboundedFrame(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnboundedFrame" ):
return visitor.visitUnboundedFrame(self)
else:
return visitor.visitChildren(self)
class CurrentRowBoundContext(FrameBoundContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.FrameBoundContext
super().__init__(parser)
self.copyFrom(ctx)
def CURRENT(self):
return self.getToken(SqlBaseParser.CURRENT, 0)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentRowBound" ):
listener.enterCurrentRowBound(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentRowBound" ):
listener.exitCurrentRowBound(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentRowBound" ):
return visitor.visitCurrentRowBound(self)
else:
return visitor.visitChildren(self)
def frameBound(self):
localctx = SqlBaseParser.FrameBoundContext(self, self._ctx, self.state)
self.enterRule(localctx, 140, self.RULE_frameBound)
self._la = 0 # Token type
try:
self.state = 2305
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,297,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.UnboundedFrameContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2296
self.match(SqlBaseParser.UNBOUNDED)
self.state = 2297
localctx.boundType = self.match(SqlBaseParser.PRECEDING)
elif la_ == 2:
localctx = SqlBaseParser.UnboundedFrameContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2298
self.match(SqlBaseParser.UNBOUNDED)
self.state = 2299
localctx.boundType = self.match(SqlBaseParser.FOLLOWING)
elif la_ == 3:
localctx = SqlBaseParser.CurrentRowBoundContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2300
self.match(SqlBaseParser.CURRENT)
self.state = 2301
self.match(SqlBaseParser.ROW)
elif la_ == 4:
localctx = SqlBaseParser.BoundedFrameContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2302
self.expression()
self.state = 2303
localctx.boundType = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.FOLLOWING or _la==SqlBaseParser.PRECEDING):
localctx.boundType = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RowPatternContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_rowPattern
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class QuantifiedPrimaryContext(RowPatternContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RowPatternContext
super().__init__(parser)
self.copyFrom(ctx)
def patternPrimary(self):
return self.getTypedRuleContext(SqlBaseParser.PatternPrimaryContext,0)
def patternQuantifier(self):
return self.getTypedRuleContext(SqlBaseParser.PatternQuantifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuantifiedPrimary" ):
listener.enterQuantifiedPrimary(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuantifiedPrimary" ):
listener.exitQuantifiedPrimary(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuantifiedPrimary" ):
return visitor.visitQuantifiedPrimary(self)
else:
return visitor.visitChildren(self)
class PatternConcatenationContext(RowPatternContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RowPatternContext
super().__init__(parser)
self.copyFrom(ctx)
def rowPattern(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowPatternContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternConcatenation" ):
listener.enterPatternConcatenation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternConcatenation" ):
listener.exitPatternConcatenation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternConcatenation" ):
return visitor.visitPatternConcatenation(self)
else:
return visitor.visitChildren(self)
class PatternAlternationContext(RowPatternContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.RowPatternContext
super().__init__(parser)
self.copyFrom(ctx)
def rowPattern(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowPatternContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternAlternation" ):
listener.enterPatternAlternation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternAlternation" ):
listener.exitPatternAlternation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternAlternation" ):
return visitor.visitPatternAlternation(self)
else:
return visitor.visitChildren(self)
def rowPattern(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SqlBaseParser.RowPatternContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 142
self.enterRecursionRule(localctx, 142, self.RULE_rowPattern, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = SqlBaseParser.QuantifiedPrimaryContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 2308
self.patternPrimary()
self.state = 2310
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,298,self._ctx)
if la_ == 1:
self.state = 2309
self.patternQuantifier()
self._ctx.stop = self._input.LT(-1)
self.state = 2319
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,300,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 2317
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,299,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.PatternConcatenationContext(self, SqlBaseParser.RowPatternContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_rowPattern)
self.state = 2312
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 2313
self.rowPattern(3)
elif la_ == 2:
localctx = SqlBaseParser.PatternAlternationContext(self, SqlBaseParser.RowPatternContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_rowPattern)
self.state = 2314
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import (
FailedPredicateException,
)
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 2315
self.match(SqlBaseParser.T__8)
self.state = 2316
self.rowPattern(2)
self.state = 2321
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,300,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PatternPrimaryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_patternPrimary
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class PatternPermutationContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def PERMUTE(self):
return self.getToken(SqlBaseParser.PERMUTE, 0)
def rowPattern(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.RowPatternContext)
else:
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternPermutation" ):
listener.enterPatternPermutation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternPermutation" ):
listener.exitPatternPermutation(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternPermutation" ):
return visitor.visitPatternPermutation(self)
else:
return visitor.visitChildren(self)
class PartitionEndAnchorContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPartitionEndAnchor" ):
listener.enterPartitionEndAnchor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPartitionEndAnchor" ):
listener.exitPartitionEndAnchor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPartitionEndAnchor" ):
return visitor.visitPartitionEndAnchor(self)
else:
return visitor.visitChildren(self)
class PatternVariableContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPatternVariable" ):
listener.enterPatternVariable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPatternVariable" ):
listener.exitPatternVariable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPatternVariable" ):
return visitor.visitPatternVariable(self)
else:
return visitor.visitChildren(self)
class ExcludedPatternContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExcludedPattern" ):
listener.enterExcludedPattern(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExcludedPattern" ):
listener.exitExcludedPattern(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExcludedPattern" ):
return visitor.visitExcludedPattern(self)
else:
return visitor.visitChildren(self)
class PartitionStartAnchorContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPartitionStartAnchor" ):
listener.enterPartitionStartAnchor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPartitionStartAnchor" ):
listener.exitPartitionStartAnchor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPartitionStartAnchor" ):
return visitor.visitPartitionStartAnchor(self)
else:
return visitor.visitChildren(self)
class EmptyPatternContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEmptyPattern" ):
listener.enterEmptyPattern(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEmptyPattern" ):
listener.exitEmptyPattern(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEmptyPattern" ):
return visitor.visitEmptyPattern(self)
else:
return visitor.visitChildren(self)
class GroupedPatternContext(PatternPrimaryContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternPrimaryContext
super().__init__(parser)
self.copyFrom(ctx)
def rowPattern(self):
return self.getTypedRuleContext(SqlBaseParser.RowPatternContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGroupedPattern" ):
listener.enterGroupedPattern(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGroupedPattern" ):
listener.exitGroupedPattern(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGroupedPattern" ):
return visitor.visitGroupedPattern(self)
else:
return visitor.visitChildren(self)
def patternPrimary(self):
localctx = SqlBaseParser.PatternPrimaryContext(self, self._ctx, self.state)
self.enterRule(localctx, 144, self.RULE_patternPrimary)
self._la = 0 # Token type
try:
self.state = 2347
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,302,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.PatternVariableContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2322
self.identifier()
elif la_ == 2:
localctx = SqlBaseParser.EmptyPatternContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2323
self.match(SqlBaseParser.T__1)
self.state = 2324
self.match(SqlBaseParser.T__2)
elif la_ == 3:
localctx = SqlBaseParser.PatternPermutationContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2325
self.match(SqlBaseParser.PERMUTE)
self.state = 2326
self.match(SqlBaseParser.T__1)
self.state = 2327
self.rowPattern(0)
self.state = 2332
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2328
self.match(SqlBaseParser.T__3)
self.state = 2329
self.rowPattern(0)
self.state = 2334
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 2335
self.match(SqlBaseParser.T__2)
elif la_ == 4:
localctx = SqlBaseParser.GroupedPatternContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2337
self.match(SqlBaseParser.T__1)
self.state = 2338
self.rowPattern(0)
self.state = 2339
self.match(SqlBaseParser.T__2)
elif la_ == 5:
localctx = SqlBaseParser.PartitionStartAnchorContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 2341
self.match(SqlBaseParser.T__9)
elif la_ == 6:
localctx = SqlBaseParser.PartitionEndAnchorContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 2342
self.match(SqlBaseParser.T__10)
elif la_ == 7:
localctx = SqlBaseParser.ExcludedPatternContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 2343
self.match(SqlBaseParser.T__11)
self.state = 2344
self.rowPattern(0)
self.state = 2345
self.match(SqlBaseParser.T__12)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PatternQuantifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_patternQuantifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ZeroOrMoreQuantifierContext(PatternQuantifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternQuantifierContext
super().__init__(parser)
self.reluctant = None # Token
self.copyFrom(ctx)
def ASTERISK(self):
return self.getToken(SqlBaseParser.ASTERISK, 0)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterZeroOrMoreQuantifier" ):
listener.enterZeroOrMoreQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitZeroOrMoreQuantifier" ):
listener.exitZeroOrMoreQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitZeroOrMoreQuantifier" ):
return visitor.visitZeroOrMoreQuantifier(self)
else:
return visitor.visitChildren(self)
class OneOrMoreQuantifierContext(PatternQuantifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternQuantifierContext
super().__init__(parser)
self.reluctant = None # Token
self.copyFrom(ctx)
def PLUS(self):
return self.getToken(SqlBaseParser.PLUS, 0)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOneOrMoreQuantifier" ):
listener.enterOneOrMoreQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOneOrMoreQuantifier" ):
listener.exitOneOrMoreQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOneOrMoreQuantifier" ):
return visitor.visitOneOrMoreQuantifier(self)
else:
return visitor.visitChildren(self)
class ZeroOrOneQuantifierContext(PatternQuantifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternQuantifierContext
super().__init__(parser)
self.reluctant = None # Token
self.copyFrom(ctx)
def QUESTION_MARK(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.QUESTION_MARK)
else:
return self.getToken(SqlBaseParser.QUESTION_MARK, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterZeroOrOneQuantifier" ):
listener.enterZeroOrOneQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitZeroOrOneQuantifier" ):
listener.exitZeroOrOneQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitZeroOrOneQuantifier" ):
return visitor.visitZeroOrOneQuantifier(self)
else:
return visitor.visitChildren(self)
class RangeQuantifierContext(PatternQuantifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PatternQuantifierContext
super().__init__(parser)
self.exactly = None # Token
self.reluctant = None # Token
self.atLeast = None # Token
self.atMost = None # Token
self.copyFrom(ctx)
def INTEGER_VALUE(self, i:int=None):
if i is None:
return self.getTokens(SqlBaseParser.INTEGER_VALUE)
else:
return self.getToken(SqlBaseParser.INTEGER_VALUE, i)
def QUESTION_MARK(self):
return self.getToken(SqlBaseParser.QUESTION_MARK, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangeQuantifier" ):
listener.enterRangeQuantifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangeQuantifier" ):
listener.exitRangeQuantifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRangeQuantifier" ):
return visitor.visitRangeQuantifier(self)
else:
return visitor.visitChildren(self)
def patternQuantifier(self):
localctx = SqlBaseParser.PatternQuantifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 146, self.RULE_patternQuantifier)
self._la = 0 # Token type
try:
self.state = 2379
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,310,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ZeroOrMoreQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2349
self.match(SqlBaseParser.ASTERISK)
self.state = 2351
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,303,self._ctx)
if la_ == 1:
self.state = 2350
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 2:
localctx = SqlBaseParser.OneOrMoreQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2353
self.match(SqlBaseParser.PLUS)
self.state = 2355
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,304,self._ctx)
if la_ == 1:
self.state = 2354
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 3:
localctx = SqlBaseParser.ZeroOrOneQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2357
self.match(SqlBaseParser.QUESTION_MARK)
self.state = 2359
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,305,self._ctx)
if la_ == 1:
self.state = 2358
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 4:
localctx = SqlBaseParser.RangeQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2361
self.match(SqlBaseParser.T__13)
self.state = 2362
localctx.exactly = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 2363
self.match(SqlBaseParser.T__14)
self.state = 2365
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,306,self._ctx)
if la_ == 1:
self.state = 2364
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
elif la_ == 5:
localctx = SqlBaseParser.RangeQuantifierContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 2367
self.match(SqlBaseParser.T__13)
self.state = 2369
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INTEGER_VALUE:
self.state = 2368
localctx.atLeast = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 2371
self.match(SqlBaseParser.T__3)
self.state = 2373
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.INTEGER_VALUE:
self.state = 2372
localctx.atMost = self.match(SqlBaseParser.INTEGER_VALUE)
self.state = 2375
self.match(SqlBaseParser.T__14)
self.state = 2377
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,309,self._ctx)
if la_ == 1:
self.state = 2376
localctx.reluctant = self.match(SqlBaseParser.QUESTION_MARK)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UpdateAssignmentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def EQ(self):
return self.getToken(SqlBaseParser.EQ, 0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_updateAssignment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUpdateAssignment" ):
listener.enterUpdateAssignment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUpdateAssignment" ):
listener.exitUpdateAssignment(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUpdateAssignment" ):
return visitor.visitUpdateAssignment(self)
else:
return visitor.visitChildren(self)
def updateAssignment(self):
localctx = SqlBaseParser.UpdateAssignmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 148, self.RULE_updateAssignment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2381
self.identifier()
self.state = 2382
self.match(SqlBaseParser.EQ)
self.state = 2383
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExplainOptionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_explainOption
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ExplainFormatContext(ExplainOptionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ExplainOptionContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def FORMAT(self):
return self.getToken(SqlBaseParser.FORMAT, 0)
def TEXT(self):
return self.getToken(SqlBaseParser.TEXT, 0)
def GRAPHVIZ(self):
return self.getToken(SqlBaseParser.GRAPHVIZ, 0)
def JSON(self):
return self.getToken(SqlBaseParser.JSON, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExplainFormat" ):
listener.enterExplainFormat(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExplainFormat" ):
listener.exitExplainFormat(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExplainFormat" ):
return visitor.visitExplainFormat(self)
else:
return visitor.visitChildren(self)
class ExplainTypeContext(ExplainOptionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.ExplainOptionContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TYPE(self):
return self.getToken(SqlBaseParser.TYPE, 0)
def LOGICAL(self):
return self.getToken(SqlBaseParser.LOGICAL, 0)
def DISTRIBUTED(self):
return self.getToken(SqlBaseParser.DISTRIBUTED, 0)
def VALIDATE(self):
return self.getToken(SqlBaseParser.VALIDATE, 0)
def IO(self):
return self.getToken(SqlBaseParser.IO, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExplainType" ):
listener.enterExplainType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExplainType" ):
listener.exitExplainType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExplainType" ):
return visitor.visitExplainType(self)
else:
return visitor.visitChildren(self)
def explainOption(self):
localctx = SqlBaseParser.ExplainOptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 150, self.RULE_explainOption)
self._la = 0 # Token type
try:
self.state = 2389
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.FORMAT]:
localctx = SqlBaseParser.ExplainFormatContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2385
self.match(SqlBaseParser.FORMAT)
self.state = 2386
localctx.value = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.GRAPHVIZ or _la==SqlBaseParser.JSON or _la==SqlBaseParser.TEXT):
localctx.value = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
elif token in [SqlBaseParser.TYPE]:
localctx = SqlBaseParser.ExplainTypeContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2387
self.match(SqlBaseParser.TYPE)
self.state = 2388
localctx.value = self._input.LT(1)
_la = self._input.LA(1)
if not(((((_la - 68)) & ~0x3f) == 0 and ((1 << (_la - 68)) & ((1 << (SqlBaseParser.DISTRIBUTED - 68)) | (1 << (SqlBaseParser.IO - 68)) | (1 << (SqlBaseParser.LOGICAL - 68)))) != 0) or _la==SqlBaseParser.VALIDATE):
localctx.value = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TransactionModeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_transactionMode
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class TransactionAccessModeContext(TransactionModeContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.TransactionModeContext
super().__init__(parser)
self.accessMode = None # Token
self.copyFrom(ctx)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def ONLY(self):
return self.getToken(SqlBaseParser.ONLY, 0)
def WRITE(self):
return self.getToken(SqlBaseParser.WRITE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTransactionAccessMode" ):
listener.enterTransactionAccessMode(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTransactionAccessMode" ):
listener.exitTransactionAccessMode(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTransactionAccessMode" ):
return visitor.visitTransactionAccessMode(self)
else:
return visitor.visitChildren(self)
class IsolationLevelContext(TransactionModeContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.TransactionModeContext
super().__init__(parser)
self.copyFrom(ctx)
def ISOLATION(self):
return self.getToken(SqlBaseParser.ISOLATION, 0)
def LEVEL(self):
return self.getToken(SqlBaseParser.LEVEL, 0)
def levelOfIsolation(self):
return self.getTypedRuleContext(SqlBaseParser.LevelOfIsolationContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIsolationLevel" ):
listener.enterIsolationLevel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIsolationLevel" ):
listener.exitIsolationLevel(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIsolationLevel" ):
return visitor.visitIsolationLevel(self)
else:
return visitor.visitChildren(self)
def transactionMode(self):
localctx = SqlBaseParser.TransactionModeContext(self, self._ctx, self.state)
self.enterRule(localctx, 152, self.RULE_transactionMode)
self._la = 0 # Token type
try:
self.state = 2396
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ISOLATION]:
localctx = SqlBaseParser.IsolationLevelContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2391
self.match(SqlBaseParser.ISOLATION)
self.state = 2392
self.match(SqlBaseParser.LEVEL)
self.state = 2393
self.levelOfIsolation()
elif token in [SqlBaseParser.READ]:
localctx = SqlBaseParser.TransactionAccessModeContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2394
self.match(SqlBaseParser.READ)
self.state = 2395
localctx.accessMode = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==SqlBaseParser.ONLY or _la==SqlBaseParser.WRITE):
localctx.accessMode = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LevelOfIsolationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_levelOfIsolation
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ReadUncommittedContext(LevelOfIsolationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.LevelOfIsolationContext
super().__init__(parser)
self.copyFrom(ctx)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def UNCOMMITTED(self):
return self.getToken(SqlBaseParser.UNCOMMITTED, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReadUncommitted" ):
listener.enterReadUncommitted(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReadUncommitted" ):
listener.exitReadUncommitted(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitReadUncommitted" ):
return visitor.visitReadUncommitted(self)
else:
return visitor.visitChildren(self)
class SerializableContext(LevelOfIsolationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.LevelOfIsolationContext
super().__init__(parser)
self.copyFrom(ctx)
def SERIALIZABLE(self):
return self.getToken(SqlBaseParser.SERIALIZABLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSerializable" ):
listener.enterSerializable(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSerializable" ):
listener.exitSerializable(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSerializable" ):
return visitor.visitSerializable(self)
else:
return visitor.visitChildren(self)
class ReadCommittedContext(LevelOfIsolationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.LevelOfIsolationContext
super().__init__(parser)
self.copyFrom(ctx)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def COMMITTED(self):
return self.getToken(SqlBaseParser.COMMITTED, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReadCommitted" ):
listener.enterReadCommitted(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReadCommitted" ):
listener.exitReadCommitted(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitReadCommitted" ):
return visitor.visitReadCommitted(self)
else:
return visitor.visitChildren(self)
class RepeatableReadContext(LevelOfIsolationContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.LevelOfIsolationContext
super().__init__(parser)
self.copyFrom(ctx)
def REPEATABLE(self):
return self.getToken(SqlBaseParser.REPEATABLE, 0)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRepeatableRead" ):
listener.enterRepeatableRead(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRepeatableRead" ):
listener.exitRepeatableRead(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRepeatableRead" ):
return visitor.visitRepeatableRead(self)
else:
return visitor.visitChildren(self)
def levelOfIsolation(self):
localctx = SqlBaseParser.LevelOfIsolationContext(self, self._ctx, self.state)
self.enterRule(localctx, 154, self.RULE_levelOfIsolation)
try:
self.state = 2405
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,313,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.ReadUncommittedContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2398
self.match(SqlBaseParser.READ)
self.state = 2399
self.match(SqlBaseParser.UNCOMMITTED)
elif la_ == 2:
localctx = SqlBaseParser.ReadCommittedContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2400
self.match(SqlBaseParser.READ)
self.state = 2401
self.match(SqlBaseParser.COMMITTED)
elif la_ == 3:
localctx = SqlBaseParser.RepeatableReadContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2402
self.match(SqlBaseParser.REPEATABLE)
self.state = 2403
self.match(SqlBaseParser.READ)
elif la_ == 4:
localctx = SqlBaseParser.SerializableContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2404
self.match(SqlBaseParser.SERIALIZABLE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CallArgumentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_callArgument
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class PositionalArgumentContext(CallArgumentContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.CallArgumentContext
super().__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPositionalArgument" ):
listener.enterPositionalArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPositionalArgument" ):
listener.exitPositionalArgument(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPositionalArgument" ):
return visitor.visitPositionalArgument(self)
else:
return visitor.visitChildren(self)
class NamedArgumentContext(CallArgumentContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.CallArgumentContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def expression(self):
return self.getTypedRuleContext(SqlBaseParser.ExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNamedArgument" ):
listener.enterNamedArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNamedArgument" ):
listener.exitNamedArgument(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNamedArgument" ):
return visitor.visitNamedArgument(self)
else:
return visitor.visitChildren(self)
def callArgument(self):
localctx = SqlBaseParser.CallArgumentContext(self, self._ctx, self.state)
self.enterRule(localctx, 156, self.RULE_callArgument)
try:
self.state = 2412
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,314,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.PositionalArgumentContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2407
self.expression()
elif la_ == 2:
localctx = SqlBaseParser.NamedArgumentContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2408
self.identifier()
self.state = 2409
self.match(SqlBaseParser.T__15)
self.state = 2410
self.expression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PathElementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_pathElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class QualifiedArgumentContext(PathElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PathElementContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQualifiedArgument" ):
listener.enterQualifiedArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQualifiedArgument" ):
listener.exitQualifiedArgument(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQualifiedArgument" ):
return visitor.visitQualifiedArgument(self)
else:
return visitor.visitChildren(self)
class UnqualifiedArgumentContext(PathElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PathElementContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnqualifiedArgument" ):
listener.enterUnqualifiedArgument(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnqualifiedArgument" ):
listener.exitUnqualifiedArgument(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnqualifiedArgument" ):
return visitor.visitUnqualifiedArgument(self)
else:
return visitor.visitChildren(self)
def pathElement(self):
localctx = SqlBaseParser.PathElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 158, self.RULE_pathElement)
try:
self.state = 2419
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,315,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.QualifiedArgumentContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2414
self.identifier()
self.state = 2415
self.match(SqlBaseParser.T__0)
self.state = 2416
self.identifier()
elif la_ == 2:
localctx = SqlBaseParser.UnqualifiedArgumentContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2418
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PathSpecificationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def pathElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.PathElementContext)
else:
return self.getTypedRuleContext(SqlBaseParser.PathElementContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_pathSpecification
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPathSpecification" ):
listener.enterPathSpecification(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPathSpecification" ):
listener.exitPathSpecification(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPathSpecification" ):
return visitor.visitPathSpecification(self)
else:
return visitor.visitChildren(self)
def pathSpecification(self):
localctx = SqlBaseParser.PathSpecificationContext(self, self._ctx, self.state)
self.enterRule(localctx, 160, self.RULE_pathSpecification)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2421
self.pathElement()
self.state = 2426
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2422
self.match(SqlBaseParser.T__3)
self.state = 2423
self.pathElement()
self.state = 2428
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrivilegeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CREATE(self):
return self.getToken(SqlBaseParser.CREATE, 0)
def SELECT(self):
return self.getToken(SqlBaseParser.SELECT, 0)
def DELETE(self):
return self.getToken(SqlBaseParser.DELETE, 0)
def INSERT(self):
return self.getToken(SqlBaseParser.INSERT, 0)
def UPDATE(self):
return self.getToken(SqlBaseParser.UPDATE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_privilege
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrivilege" ):
listener.enterPrivilege(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrivilege" ):
listener.exitPrivilege(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrivilege" ):
return visitor.visitPrivilege(self)
else:
return visitor.visitChildren(self)
def privilege(self):
localctx = SqlBaseParser.PrivilegeContext(self, self._ctx, self.state)
self.enterRule(localctx, 162, self.RULE_privilege)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2429
_la = self._input.LA(1)
if not(_la==SqlBaseParser.CREATE or _la==SqlBaseParser.DELETE or _la==SqlBaseParser.INSERT or _la==SqlBaseParser.SELECT or _la==SqlBaseParser.UPDATE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QualifiedNameContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_qualifiedName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQualifiedName" ):
listener.enterQualifiedName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQualifiedName" ):
listener.exitQualifiedName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQualifiedName" ):
return visitor.visitQualifiedName(self)
else:
return visitor.visitChildren(self)
def qualifiedName(self):
localctx = SqlBaseParser.QualifiedNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 164, self.RULE_qualifiedName)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2431
self.identifier()
self.state = 2436
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,317,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 2432
self.match(SqlBaseParser.T__0)
self.state = 2433
self.identifier()
self.state = 2438
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,317,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryPeriodContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.end = None # ValueExpressionContext
def FOR(self):
return self.getToken(SqlBaseParser.FOR, 0)
def rangeType(self):
return self.getTypedRuleContext(SqlBaseParser.RangeTypeContext,0)
def AS(self):
return self.getToken(SqlBaseParser.AS, 0)
def OF(self):
return self.getToken(SqlBaseParser.OF, 0)
def valueExpression(self):
return self.getTypedRuleContext(SqlBaseParser.ValueExpressionContext,0)
def getRuleIndex(self):
return SqlBaseParser.RULE_queryPeriod
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQueryPeriod" ):
listener.enterQueryPeriod(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQueryPeriod" ):
listener.exitQueryPeriod(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQueryPeriod" ):
return visitor.visitQueryPeriod(self)
else:
return visitor.visitChildren(self)
def queryPeriod(self):
localctx = SqlBaseParser.QueryPeriodContext(self, self._ctx, self.state)
self.enterRule(localctx, 166, self.RULE_queryPeriod)
try:
self.enterOuterAlt(localctx, 1)
self.state = 2439
self.match(SqlBaseParser.FOR)
self.state = 2440
self.rangeType()
self.state = 2441
self.match(SqlBaseParser.AS)
self.state = 2442
self.match(SqlBaseParser.OF)
self.state = 2443
localctx.end = self.valueExpression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RangeTypeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TIMESTAMP(self):
return self.getToken(SqlBaseParser.TIMESTAMP, 0)
def VERSION(self):
return self.getToken(SqlBaseParser.VERSION, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_rangeType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRangeType" ):
listener.enterRangeType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRangeType" ):
listener.exitRangeType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRangeType" ):
return visitor.visitRangeType(self)
else:
return visitor.visitChildren(self)
def rangeType(self):
localctx = SqlBaseParser.RangeTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 168, self.RULE_rangeType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2445
_la = self._input.LA(1)
if not(_la==SqlBaseParser.TIMESTAMP or _la==SqlBaseParser.VERSION):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GrantorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_grantor
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class CurrentUserGrantorContext(GrantorContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GrantorContext
super().__init__(parser)
self.copyFrom(ctx)
def CURRENT_USER(self):
return self.getToken(SqlBaseParser.CURRENT_USER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentUserGrantor" ):
listener.enterCurrentUserGrantor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentUserGrantor" ):
listener.exitCurrentUserGrantor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentUserGrantor" ):
return visitor.visitCurrentUserGrantor(self)
else:
return visitor.visitChildren(self)
class SpecifiedPrincipalContext(GrantorContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GrantorContext
super().__init__(parser)
self.copyFrom(ctx)
def principal(self):
return self.getTypedRuleContext(SqlBaseParser.PrincipalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSpecifiedPrincipal" ):
listener.enterSpecifiedPrincipal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSpecifiedPrincipal" ):
listener.exitSpecifiedPrincipal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSpecifiedPrincipal" ):
return visitor.visitSpecifiedPrincipal(self)
else:
return visitor.visitChildren(self)
class CurrentRoleGrantorContext(GrantorContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.GrantorContext
super().__init__(parser)
self.copyFrom(ctx)
def CURRENT_ROLE(self):
return self.getToken(SqlBaseParser.CURRENT_ROLE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCurrentRoleGrantor" ):
listener.enterCurrentRoleGrantor(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCurrentRoleGrantor" ):
listener.exitCurrentRoleGrantor(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCurrentRoleGrantor" ):
return visitor.visitCurrentRoleGrantor(self)
else:
return visitor.visitChildren(self)
def grantor(self):
localctx = SqlBaseParser.GrantorContext(self, self._ctx, self.state)
self.enterRule(localctx, 170, self.RULE_grantor)
try:
self.state = 2450
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE, SqlBaseParser.IDENTIFIER, SqlBaseParser.DIGIT_IDENTIFIER, SqlBaseParser.QUOTED_IDENTIFIER, SqlBaseParser.BACKQUOTED_IDENTIFIER]:
localctx = SqlBaseParser.SpecifiedPrincipalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2447
self.principal()
elif token in [SqlBaseParser.CURRENT_USER]:
localctx = SqlBaseParser.CurrentUserGrantorContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2448
self.match(SqlBaseParser.CURRENT_USER)
elif token in [SqlBaseParser.CURRENT_ROLE]:
localctx = SqlBaseParser.CurrentRoleGrantorContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2449
self.match(SqlBaseParser.CURRENT_ROLE)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrincipalContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_principal
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class UnspecifiedPrincipalContext(PrincipalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrincipalContext
super().__init__(parser)
self.copyFrom(ctx)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnspecifiedPrincipal" ):
listener.enterUnspecifiedPrincipal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnspecifiedPrincipal" ):
listener.exitUnspecifiedPrincipal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnspecifiedPrincipal" ):
return visitor.visitUnspecifiedPrincipal(self)
else:
return visitor.visitChildren(self)
class UserPrincipalContext(PrincipalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrincipalContext
super().__init__(parser)
self.copyFrom(ctx)
def USER(self):
return self.getToken(SqlBaseParser.USER, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUserPrincipal" ):
listener.enterUserPrincipal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUserPrincipal" ):
listener.exitUserPrincipal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUserPrincipal" ):
return visitor.visitUserPrincipal(self)
else:
return visitor.visitChildren(self)
class RolePrincipalContext(PrincipalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.PrincipalContext
super().__init__(parser)
self.copyFrom(ctx)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def identifier(self):
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRolePrincipal" ):
listener.enterRolePrincipal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRolePrincipal" ):
listener.exitRolePrincipal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRolePrincipal" ):
return visitor.visitRolePrincipal(self)
else:
return visitor.visitChildren(self)
def principal(self):
localctx = SqlBaseParser.PrincipalContext(self, self._ctx, self.state)
self.enterRule(localctx, 172, self.RULE_principal)
try:
self.state = 2457
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,319,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.UnspecifiedPrincipalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2452
self.identifier()
elif la_ == 2:
localctx = SqlBaseParser.UserPrincipalContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2453
self.match(SqlBaseParser.USER)
self.state = 2454
self.identifier()
elif la_ == 3:
localctx = SqlBaseParser.RolePrincipalContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2455
self.match(SqlBaseParser.ROLE)
self.state = 2456
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RolesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SqlBaseParser.IdentifierContext)
else:
return self.getTypedRuleContext(SqlBaseParser.IdentifierContext,i)
def getRuleIndex(self):
return SqlBaseParser.RULE_roles
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRoles" ):
listener.enterRoles(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRoles" ):
listener.exitRoles(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRoles" ):
return visitor.visitRoles(self)
else:
return visitor.visitChildren(self)
def roles(self):
localctx = SqlBaseParser.RolesContext(self, self._ctx, self.state)
self.enterRule(localctx, 174, self.RULE_roles)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2459
self.identifier()
self.state = 2464
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SqlBaseParser.T__3:
self.state = 2460
self.match(SqlBaseParser.T__3)
self.state = 2461
self.identifier()
self.state = 2466
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_identifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class BackQuotedIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.IdentifierContext
super().__init__(parser)
self.copyFrom(ctx)
def BACKQUOTED_IDENTIFIER(self):
return self.getToken(SqlBaseParser.BACKQUOTED_IDENTIFIER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBackQuotedIdentifier" ):
listener.enterBackQuotedIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBackQuotedIdentifier" ):
listener.exitBackQuotedIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBackQuotedIdentifier" ):
return visitor.visitBackQuotedIdentifier(self)
else:
return visitor.visitChildren(self)
class QuotedIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.IdentifierContext
super().__init__(parser)
self.copyFrom(ctx)
def QUOTED_IDENTIFIER(self):
return self.getToken(SqlBaseParser.QUOTED_IDENTIFIER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuotedIdentifier" ):
listener.enterQuotedIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuotedIdentifier" ):
listener.exitQuotedIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitQuotedIdentifier" ):
return visitor.visitQuotedIdentifier(self)
else:
return visitor.visitChildren(self)
class DigitIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.IdentifierContext
super().__init__(parser)
self.copyFrom(ctx)
def DIGIT_IDENTIFIER(self):
return self.getToken(SqlBaseParser.DIGIT_IDENTIFIER, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDigitIdentifier" ):
listener.enterDigitIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDigitIdentifier" ):
listener.exitDigitIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDigitIdentifier" ):
return visitor.visitDigitIdentifier(self)
else:
return visitor.visitChildren(self)
class UnquotedIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.IdentifierContext
super().__init__(parser)
self.copyFrom(ctx)
def IDENTIFIER(self):
return self.getToken(SqlBaseParser.IDENTIFIER, 0)
def nonReserved(self):
return self.getTypedRuleContext(SqlBaseParser.NonReservedContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnquotedIdentifier" ):
listener.enterUnquotedIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnquotedIdentifier" ):
listener.exitUnquotedIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnquotedIdentifier" ):
return visitor.visitUnquotedIdentifier(self)
else:
return visitor.visitChildren(self)
def identifier(self):
localctx = SqlBaseParser.IdentifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 176, self.RULE_identifier)
try:
self.state = 2472
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SqlBaseParser.IDENTIFIER]:
localctx = SqlBaseParser.UnquotedIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2467
self.match(SqlBaseParser.IDENTIFIER)
elif token in [SqlBaseParser.QUOTED_IDENTIFIER]:
localctx = SqlBaseParser.QuotedIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2468
self.match(SqlBaseParser.QUOTED_IDENTIFIER)
elif token in [SqlBaseParser.ADD, SqlBaseParser.ADMIN, SqlBaseParser.AFTER, SqlBaseParser.ALL, SqlBaseParser.ANALYZE, SqlBaseParser.ANY, SqlBaseParser.ARRAY, SqlBaseParser.ASC, SqlBaseParser.AT, SqlBaseParser.AUTHORIZATION, SqlBaseParser.BERNOULLI, SqlBaseParser.CALL, SqlBaseParser.CASCADE, SqlBaseParser.CATALOGS, SqlBaseParser.COLUMN, SqlBaseParser.COLUMNS, SqlBaseParser.COMMENT, SqlBaseParser.COMMIT, SqlBaseParser.COMMITTED, SqlBaseParser.COUNT, SqlBaseParser.CURRENT, SqlBaseParser.DATA, SqlBaseParser.DATE, SqlBaseParser.DAY, SqlBaseParser.DEFINER, SqlBaseParser.DENY, SqlBaseParser.DESC, SqlBaseParser.DEFINE, SqlBaseParser.DISTRIBUTED, SqlBaseParser.DOUBLE, SqlBaseParser.EMPTY, SqlBaseParser.ERROR, SqlBaseParser.EXCLUDING, SqlBaseParser.EXPLAIN, SqlBaseParser.FETCH, SqlBaseParser.FILTER, SqlBaseParser.FINAL, SqlBaseParser.FIRST, SqlBaseParser.FOLLOWING, SqlBaseParser.FORMAT, SqlBaseParser.FUNCTIONS, SqlBaseParser.GRANT, SqlBaseParser.GRANTED, SqlBaseParser.GRANTS, SqlBaseParser.GRAPHVIZ, SqlBaseParser.GROUPS, SqlBaseParser.HOUR, SqlBaseParser.IF, SqlBaseParser.IGNORE, SqlBaseParser.INCLUDING, SqlBaseParser.INITIAL, SqlBaseParser.INPUT, SqlBaseParser.INTERVAL, SqlBaseParser.INVOKER, SqlBaseParser.IO, SqlBaseParser.ISOLATION, SqlBaseParser.JSON, SqlBaseParser.LAST, SqlBaseParser.LATERAL, SqlBaseParser.LEVEL, SqlBaseParser.LIMIT, SqlBaseParser.LOCAL, SqlBaseParser.LOGICAL, SqlBaseParser.MAP, SqlBaseParser.MATCH, SqlBaseParser.MATCHED, SqlBaseParser.MATCHES, SqlBaseParser.MATCH_RECOGNIZE, SqlBaseParser.MATERIALIZED, SqlBaseParser.MEASURES, SqlBaseParser.MERGE, SqlBaseParser.MINUTE, SqlBaseParser.MONTH, SqlBaseParser.NEXT, SqlBaseParser.NFC, SqlBaseParser.NFD, SqlBaseParser.NFKC, SqlBaseParser.NFKD, SqlBaseParser.NO, SqlBaseParser.NONE, SqlBaseParser.NULLIF, SqlBaseParser.NULLS, SqlBaseParser.OFFSET, SqlBaseParser.OMIT, SqlBaseParser.OF, SqlBaseParser.ONE, SqlBaseParser.ONLY, SqlBaseParser.OPTION, SqlBaseParser.ORDINALITY, SqlBaseParser.OUTPUT, SqlBaseParser.OVER, SqlBaseParser.OVERFLOW, SqlBaseParser.PARTITION, SqlBaseParser.PARTITIONS, SqlBaseParser.PAST, SqlBaseParser.PATH, SqlBaseParser.PATTERN, SqlBaseParser.PER, SqlBaseParser.PERMUTE, SqlBaseParser.POSITION, SqlBaseParser.PRECEDING, SqlBaseParser.PRECISION, SqlBaseParser.PRIVILEGES, SqlBaseParser.PROPERTIES, SqlBaseParser.RANGE, SqlBaseParser.READ, SqlBaseParser.REFRESH, SqlBaseParser.RENAME, SqlBaseParser.REPEATABLE, SqlBaseParser.REPLACE, SqlBaseParser.RESET, SqlBaseParser.RESPECT, SqlBaseParser.RESTRICT, SqlBaseParser.REVOKE, SqlBaseParser.ROLE, SqlBaseParser.ROLES, SqlBaseParser.ROLLBACK, SqlBaseParser.ROW, SqlBaseParser.ROWS, SqlBaseParser.RUNNING, SqlBaseParser.SCHEMA, SqlBaseParser.SCHEMAS, SqlBaseParser.SECOND, SqlBaseParser.SECURITY, SqlBaseParser.SEEK, SqlBaseParser.SERIALIZABLE, SqlBaseParser.SESSION, SqlBaseParser.SET, SqlBaseParser.SETS, SqlBaseParser.SHOW, SqlBaseParser.SOME, SqlBaseParser.START, SqlBaseParser.STATS, SqlBaseParser.SUBSET, SqlBaseParser.SUBSTRING, SqlBaseParser.SYSTEM, SqlBaseParser.TABLES, SqlBaseParser.TABLESAMPLE, SqlBaseParser.TEXT, SqlBaseParser.TIES, SqlBaseParser.TIME, SqlBaseParser.TIMESTAMP, SqlBaseParser.TO, SqlBaseParser.TRANSACTION, SqlBaseParser.TRUNCATE, SqlBaseParser.TRY_CAST, SqlBaseParser.TYPE, SqlBaseParser.UNBOUNDED, SqlBaseParser.UNCOMMITTED, SqlBaseParser.UNMATCHED, SqlBaseParser.UPDATE, SqlBaseParser.USE, SqlBaseParser.USER, SqlBaseParser.VALIDATE, SqlBaseParser.VERBOSE, SqlBaseParser.VERSION, SqlBaseParser.VIEW, SqlBaseParser.WINDOW, SqlBaseParser.WITHIN, SqlBaseParser.WITHOUT, SqlBaseParser.WORK, SqlBaseParser.WRITE, SqlBaseParser.YEAR, SqlBaseParser.ZONE]:
localctx = SqlBaseParser.UnquotedIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2469
self.nonReserved()
elif token in [SqlBaseParser.BACKQUOTED_IDENTIFIER]:
localctx = SqlBaseParser.BackQuotedIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 2470
self.match(SqlBaseParser.BACKQUOTED_IDENTIFIER)
elif token in [SqlBaseParser.DIGIT_IDENTIFIER]:
localctx = SqlBaseParser.DigitIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 2471
self.match(SqlBaseParser.DIGIT_IDENTIFIER)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NumberContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SqlBaseParser.RULE_number
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class DecimalLiteralContext(NumberContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.NumberContext
super().__init__(parser)
self.copyFrom(ctx)
def DECIMAL_VALUE(self):
return self.getToken(SqlBaseParser.DECIMAL_VALUE, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDecimalLiteral" ):
listener.enterDecimalLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDecimalLiteral" ):
listener.exitDecimalLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDecimalLiteral" ):
return visitor.visitDecimalLiteral(self)
else:
return visitor.visitChildren(self)
class DoubleLiteralContext(NumberContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.NumberContext
super().__init__(parser)
self.copyFrom(ctx)
def DOUBLE_VALUE(self):
return self.getToken(SqlBaseParser.DOUBLE_VALUE, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDoubleLiteral" ):
listener.enterDoubleLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDoubleLiteral" ):
listener.exitDoubleLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDoubleLiteral" ):
return visitor.visitDoubleLiteral(self)
else:
return visitor.visitChildren(self)
class IntegerLiteralContext(NumberContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a SqlBaseParser.NumberContext
super().__init__(parser)
self.copyFrom(ctx)
def INTEGER_VALUE(self):
return self.getToken(SqlBaseParser.INTEGER_VALUE, 0)
def MINUS(self):
return self.getToken(SqlBaseParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntegerLiteral" ):
listener.enterIntegerLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntegerLiteral" ):
listener.exitIntegerLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntegerLiteral" ):
return visitor.visitIntegerLiteral(self)
else:
return visitor.visitChildren(self)
def number(self):
localctx = SqlBaseParser.NumberContext(self, self._ctx, self.state)
self.enterRule(localctx, 178, self.RULE_number)
self._la = 0 # Token type
try:
self.state = 2486
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,325,self._ctx)
if la_ == 1:
localctx = SqlBaseParser.DecimalLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 2475
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MINUS:
self.state = 2474
self.match(SqlBaseParser.MINUS)
self.state = 2477
self.match(SqlBaseParser.DECIMAL_VALUE)
elif la_ == 2:
localctx = SqlBaseParser.DoubleLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 2479
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MINUS:
self.state = 2478
self.match(SqlBaseParser.MINUS)
self.state = 2481
self.match(SqlBaseParser.DOUBLE_VALUE)
elif la_ == 3:
localctx = SqlBaseParser.IntegerLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 2483
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SqlBaseParser.MINUS:
self.state = 2482
self.match(SqlBaseParser.MINUS)
self.state = 2485
self.match(SqlBaseParser.INTEGER_VALUE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NonReservedContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ADD(self):
return self.getToken(SqlBaseParser.ADD, 0)
def ADMIN(self):
return self.getToken(SqlBaseParser.ADMIN, 0)
def AFTER(self):
return self.getToken(SqlBaseParser.AFTER, 0)
def ALL(self):
return self.getToken(SqlBaseParser.ALL, 0)
def ANALYZE(self):
return self.getToken(SqlBaseParser.ANALYZE, 0)
def ANY(self):
return self.getToken(SqlBaseParser.ANY, 0)
def ARRAY(self):
return self.getToken(SqlBaseParser.ARRAY, 0)
def ASC(self):
return self.getToken(SqlBaseParser.ASC, 0)
def AT(self):
return self.getToken(SqlBaseParser.AT, 0)
def AUTHORIZATION(self):
return self.getToken(SqlBaseParser.AUTHORIZATION, 0)
def BERNOULLI(self):
return self.getToken(SqlBaseParser.BERNOULLI, 0)
def CALL(self):
return self.getToken(SqlBaseParser.CALL, 0)
def CASCADE(self):
return self.getToken(SqlBaseParser.CASCADE, 0)
def CATALOGS(self):
return self.getToken(SqlBaseParser.CATALOGS, 0)
def COLUMN(self):
return self.getToken(SqlBaseParser.COLUMN, 0)
def COLUMNS(self):
return self.getToken(SqlBaseParser.COLUMNS, 0)
def COMMENT(self):
return self.getToken(SqlBaseParser.COMMENT, 0)
def COMMIT(self):
return self.getToken(SqlBaseParser.COMMIT, 0)
def COMMITTED(self):
return self.getToken(SqlBaseParser.COMMITTED, 0)
def COUNT(self):
return self.getToken(SqlBaseParser.COUNT, 0)
def CURRENT(self):
return self.getToken(SqlBaseParser.CURRENT, 0)
def DATA(self):
return self.getToken(SqlBaseParser.DATA, 0)
def DATE(self):
return self.getToken(SqlBaseParser.DATE, 0)
def DAY(self):
return self.getToken(SqlBaseParser.DAY, 0)
def DEFINE(self):
return self.getToken(SqlBaseParser.DEFINE, 0)
def DEFINER(self):
return self.getToken(SqlBaseParser.DEFINER, 0)
def DESC(self):
return self.getToken(SqlBaseParser.DESC, 0)
def DISTRIBUTED(self):
return self.getToken(SqlBaseParser.DISTRIBUTED, 0)
def DOUBLE(self):
return self.getToken(SqlBaseParser.DOUBLE, 0)
def EMPTY(self):
return self.getToken(SqlBaseParser.EMPTY, 0)
def ERROR(self):
return self.getToken(SqlBaseParser.ERROR, 0)
def EXCLUDING(self):
return self.getToken(SqlBaseParser.EXCLUDING, 0)
def EXPLAIN(self):
return self.getToken(SqlBaseParser.EXPLAIN, 0)
def FETCH(self):
return self.getToken(SqlBaseParser.FETCH, 0)
def FILTER(self):
return self.getToken(SqlBaseParser.FILTER, 0)
def FINAL(self):
return self.getToken(SqlBaseParser.FINAL, 0)
def FIRST(self):
return self.getToken(SqlBaseParser.FIRST, 0)
def FOLLOWING(self):
return self.getToken(SqlBaseParser.FOLLOWING, 0)
def FORMAT(self):
return self.getToken(SqlBaseParser.FORMAT, 0)
def FUNCTIONS(self):
return self.getToken(SqlBaseParser.FUNCTIONS, 0)
def GRANT(self):
return self.getToken(SqlBaseParser.GRANT, 0)
def DENY(self):
return self.getToken(SqlBaseParser.DENY, 0)
def GRANTED(self):
return self.getToken(SqlBaseParser.GRANTED, 0)
def GRANTS(self):
return self.getToken(SqlBaseParser.GRANTS, 0)
def GRAPHVIZ(self):
return self.getToken(SqlBaseParser.GRAPHVIZ, 0)
def GROUPS(self):
return self.getToken(SqlBaseParser.GROUPS, 0)
def HOUR(self):
return self.getToken(SqlBaseParser.HOUR, 0)
def IF(self):
return self.getToken(SqlBaseParser.IF, 0)
def IGNORE(self):
return self.getToken(SqlBaseParser.IGNORE, 0)
def INCLUDING(self):
return self.getToken(SqlBaseParser.INCLUDING, 0)
def INITIAL(self):
return self.getToken(SqlBaseParser.INITIAL, 0)
def INPUT(self):
return self.getToken(SqlBaseParser.INPUT, 0)
def INTERVAL(self):
return self.getToken(SqlBaseParser.INTERVAL, 0)
def INVOKER(self):
return self.getToken(SqlBaseParser.INVOKER, 0)
def IO(self):
return self.getToken(SqlBaseParser.IO, 0)
def ISOLATION(self):
return self.getToken(SqlBaseParser.ISOLATION, 0)
def JSON(self):
return self.getToken(SqlBaseParser.JSON, 0)
def LAST(self):
return self.getToken(SqlBaseParser.LAST, 0)
def LATERAL(self):
return self.getToken(SqlBaseParser.LATERAL, 0)
def LEVEL(self):
return self.getToken(SqlBaseParser.LEVEL, 0)
def LIMIT(self):
return self.getToken(SqlBaseParser.LIMIT, 0)
def LOCAL(self):
return self.getToken(SqlBaseParser.LOCAL, 0)
def LOGICAL(self):
return self.getToken(SqlBaseParser.LOGICAL, 0)
def MAP(self):
return self.getToken(SqlBaseParser.MAP, 0)
def MATCH(self):
return self.getToken(SqlBaseParser.MATCH, 0)
def MATCHED(self):
return self.getToken(SqlBaseParser.MATCHED, 0)
def MATCHES(self):
return self.getToken(SqlBaseParser.MATCHES, 0)
def MATCH_RECOGNIZE(self):
return self.getToken(SqlBaseParser.MATCH_RECOGNIZE, 0)
def MATERIALIZED(self):
return self.getToken(SqlBaseParser.MATERIALIZED, 0)
def MEASURES(self):
return self.getToken(SqlBaseParser.MEASURES, 0)
def MERGE(self):
return self.getToken(SqlBaseParser.MERGE, 0)
def MINUTE(self):
return self.getToken(SqlBaseParser.MINUTE, 0)
def MONTH(self):
return self.getToken(SqlBaseParser.MONTH, 0)
def NEXT(self):
return self.getToken(SqlBaseParser.NEXT, 0)
def NFC(self):
return self.getToken(SqlBaseParser.NFC, 0)
def NFD(self):
return self.getToken(SqlBaseParser.NFD, 0)
def NFKC(self):
return self.getToken(SqlBaseParser.NFKC, 0)
def NFKD(self):
return self.getToken(SqlBaseParser.NFKD, 0)
def NO(self):
return self.getToken(SqlBaseParser.NO, 0)
def NONE(self):
return self.getToken(SqlBaseParser.NONE, 0)
def NULLIF(self):
return self.getToken(SqlBaseParser.NULLIF, 0)
def NULLS(self):
return self.getToken(SqlBaseParser.NULLS, 0)
def OF(self):
return self.getToken(SqlBaseParser.OF, 0)
def OFFSET(self):
return self.getToken(SqlBaseParser.OFFSET, 0)
def OMIT(self):
return self.getToken(SqlBaseParser.OMIT, 0)
def ONE(self):
return self.getToken(SqlBaseParser.ONE, 0)
def ONLY(self):
return self.getToken(SqlBaseParser.ONLY, 0)
def OPTION(self):
return self.getToken(SqlBaseParser.OPTION, 0)
def ORDINALITY(self):
return self.getToken(SqlBaseParser.ORDINALITY, 0)
def OUTPUT(self):
return self.getToken(SqlBaseParser.OUTPUT, 0)
def OVER(self):
return self.getToken(SqlBaseParser.OVER, 0)
def OVERFLOW(self):
return self.getToken(SqlBaseParser.OVERFLOW, 0)
def PARTITION(self):
return self.getToken(SqlBaseParser.PARTITION, 0)
def PARTITIONS(self):
return self.getToken(SqlBaseParser.PARTITIONS, 0)
def PAST(self):
return self.getToken(SqlBaseParser.PAST, 0)
def PATH(self):
return self.getToken(SqlBaseParser.PATH, 0)
def PATTERN(self):
return self.getToken(SqlBaseParser.PATTERN, 0)
def PER(self):
return self.getToken(SqlBaseParser.PER, 0)
def PERMUTE(self):
return self.getToken(SqlBaseParser.PERMUTE, 0)
def POSITION(self):
return self.getToken(SqlBaseParser.POSITION, 0)
def PRECEDING(self):
return self.getToken(SqlBaseParser.PRECEDING, 0)
def PRECISION(self):
return self.getToken(SqlBaseParser.PRECISION, 0)
def PRIVILEGES(self):
return self.getToken(SqlBaseParser.PRIVILEGES, 0)
def PROPERTIES(self):
return self.getToken(SqlBaseParser.PROPERTIES, 0)
def RANGE(self):
return self.getToken(SqlBaseParser.RANGE, 0)
def READ(self):
return self.getToken(SqlBaseParser.READ, 0)
def REFRESH(self):
return self.getToken(SqlBaseParser.REFRESH, 0)
def RENAME(self):
return self.getToken(SqlBaseParser.RENAME, 0)
def REPEATABLE(self):
return self.getToken(SqlBaseParser.REPEATABLE, 0)
def REPLACE(self):
return self.getToken(SqlBaseParser.REPLACE, 0)
def RESET(self):
return self.getToken(SqlBaseParser.RESET, 0)
def RESPECT(self):
return self.getToken(SqlBaseParser.RESPECT, 0)
def RESTRICT(self):
return self.getToken(SqlBaseParser.RESTRICT, 0)
def REVOKE(self):
return self.getToken(SqlBaseParser.REVOKE, 0)
def ROLE(self):
return self.getToken(SqlBaseParser.ROLE, 0)
def ROLES(self):
return self.getToken(SqlBaseParser.ROLES, 0)
def ROLLBACK(self):
return self.getToken(SqlBaseParser.ROLLBACK, 0)
def ROW(self):
return self.getToken(SqlBaseParser.ROW, 0)
def ROWS(self):
return self.getToken(SqlBaseParser.ROWS, 0)
def RUNNING(self):
return self.getToken(SqlBaseParser.RUNNING, 0)
def SCHEMA(self):
return self.getToken(SqlBaseParser.SCHEMA, 0)
def SCHEMAS(self):
return self.getToken(SqlBaseParser.SCHEMAS, 0)
def SECOND(self):
return self.getToken(SqlBaseParser.SECOND, 0)
def SECURITY(self):
return self.getToken(SqlBaseParser.SECURITY, 0)
def SEEK(self):
return self.getToken(SqlBaseParser.SEEK, 0)
def SERIALIZABLE(self):
return self.getToken(SqlBaseParser.SERIALIZABLE, 0)
def SESSION(self):
return self.getToken(SqlBaseParser.SESSION, 0)
def SET(self):
return self.getToken(SqlBaseParser.SET, 0)
def SETS(self):
return self.getToken(SqlBaseParser.SETS, 0)
def SHOW(self):
return self.getToken(SqlBaseParser.SHOW, 0)
def SOME(self):
return self.getToken(SqlBaseParser.SOME, 0)
def START(self):
return self.getToken(SqlBaseParser.START, 0)
def STATS(self):
return self.getToken(SqlBaseParser.STATS, 0)
def SUBSET(self):
return self.getToken(SqlBaseParser.SUBSET, 0)
def SUBSTRING(self):
return self.getToken(SqlBaseParser.SUBSTRING, 0)
def SYSTEM(self):
return self.getToken(SqlBaseParser.SYSTEM, 0)
def TABLES(self):
return self.getToken(SqlBaseParser.TABLES, 0)
def TABLESAMPLE(self):
return self.getToken(SqlBaseParser.TABLESAMPLE, 0)
def TEXT(self):
return self.getToken(SqlBaseParser.TEXT, 0)
def TIES(self):
return self.getToken(SqlBaseParser.TIES, 0)
def TIME(self):
return self.getToken(SqlBaseParser.TIME, 0)
def TIMESTAMP(self):
return self.getToken(SqlBaseParser.TIMESTAMP, 0)
def TO(self):
return self.getToken(SqlBaseParser.TO, 0)
def TRANSACTION(self):
return self.getToken(SqlBaseParser.TRANSACTION, 0)
def TRUNCATE(self):
return self.getToken(SqlBaseParser.TRUNCATE, 0)
def TRY_CAST(self):
return self.getToken(SqlBaseParser.TRY_CAST, 0)
def TYPE(self):
return self.getToken(SqlBaseParser.TYPE, 0)
def UNBOUNDED(self):
return self.getToken(SqlBaseParser.UNBOUNDED, 0)
def UNCOMMITTED(self):
return self.getToken(SqlBaseParser.UNCOMMITTED, 0)
def UNMATCHED(self):
return self.getToken(SqlBaseParser.UNMATCHED, 0)
def UPDATE(self):
return self.getToken(SqlBaseParser.UPDATE, 0)
def USE(self):
return self.getToken(SqlBaseParser.USE, 0)
def USER(self):
return self.getToken(SqlBaseParser.USER, 0)
def VALIDATE(self):
return self.getToken(SqlBaseParser.VALIDATE, 0)
def VERBOSE(self):
return self.getToken(SqlBaseParser.VERBOSE, 0)
def VERSION(self):
return self.getToken(SqlBaseParser.VERSION, 0)
def VIEW(self):
return self.getToken(SqlBaseParser.VIEW, 0)
def WINDOW(self):
return self.getToken(SqlBaseParser.WINDOW, 0)
def WITHIN(self):
return self.getToken(SqlBaseParser.WITHIN, 0)
def WITHOUT(self):
return self.getToken(SqlBaseParser.WITHOUT, 0)
def WORK(self):
return self.getToken(SqlBaseParser.WORK, 0)
def WRITE(self):
return self.getToken(SqlBaseParser.WRITE, 0)
def YEAR(self):
return self.getToken(SqlBaseParser.YEAR, 0)
def ZONE(self):
return self.getToken(SqlBaseParser.ZONE, 0)
def getRuleIndex(self):
return SqlBaseParser.RULE_nonReserved
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNonReserved" ):
listener.enterNonReserved(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNonReserved" ):
listener.exitNonReserved(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNonReserved" ):
return visitor.visitNonReserved(self)
else:
return visitor.visitChildren(self)
def nonReserved(self):
localctx = SqlBaseParser.NonReservedContext(self, self._ctx, self.state)
self.enterRule(localctx, 180, self.RULE_nonReserved)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 2488
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SqlBaseParser.ADD) | (1 << SqlBaseParser.ADMIN) | (1 << SqlBaseParser.AFTER) | (1 << SqlBaseParser.ALL) | (1 << SqlBaseParser.ANALYZE) | (1 << SqlBaseParser.ANY) | (1 << SqlBaseParser.ARRAY) | (1 << SqlBaseParser.ASC) | (1 << SqlBaseParser.AT) | (1 << SqlBaseParser.AUTHORIZATION) | (1 << SqlBaseParser.BERNOULLI) | (1 << SqlBaseParser.CALL) | (1 << SqlBaseParser.CASCADE) | (1 << SqlBaseParser.CATALOGS) | (1 << SqlBaseParser.COLUMN) | (1 << SqlBaseParser.COLUMNS) | (1 << SqlBaseParser.COMMENT) | (1 << SqlBaseParser.COMMIT) | (1 << SqlBaseParser.COMMITTED) | (1 << SqlBaseParser.COUNT) | (1 << SqlBaseParser.CURRENT) | (1 << SqlBaseParser.DATA) | (1 << SqlBaseParser.DATE) | (1 << SqlBaseParser.DAY) | (1 << SqlBaseParser.DEFINER) | (1 << SqlBaseParser.DENY))) != 0) or ((((_la - 64)) & ~0x3f) == 0 and ((1 << (_la - 64)) & ((1 << (SqlBaseParser.DESC - 64)) | (1 << (SqlBaseParser.DEFINE - 64)) | (1 << (SqlBaseParser.DISTRIBUTED - 64)) | (1 << (SqlBaseParser.DOUBLE - 64)) | (1 << (SqlBaseParser.EMPTY - 64)) | (1 << (SqlBaseParser.ERROR - 64)) | (1 << (SqlBaseParser.EXCLUDING - 64)) | (1 << (SqlBaseParser.EXPLAIN - 64)) | (1 << (SqlBaseParser.FETCH - 64)) | (1 << (SqlBaseParser.FILTER - 64)) | (1 << (SqlBaseParser.FINAL - 64)) | (1 << (SqlBaseParser.FIRST - 64)) | (1 << (SqlBaseParser.FOLLOWING - 64)) | (1 << (SqlBaseParser.FORMAT - 64)) | (1 << (SqlBaseParser.FUNCTIONS - 64)) | (1 << (SqlBaseParser.GRANT - 64)) | (1 << (SqlBaseParser.GRANTED - 64)) | (1 << (SqlBaseParser.GRANTS - 64)) | (1 << (SqlBaseParser.GRAPHVIZ - 64)) | (1 << (SqlBaseParser.GROUPS - 64)) | (1 << (SqlBaseParser.HOUR - 64)) | (1 << (SqlBaseParser.IF - 64)) | (1 << (SqlBaseParser.IGNORE - 64)) | (1 << (SqlBaseParser.INCLUDING - 64)) | (1 << (SqlBaseParser.INITIAL - 64)) | (1 << (SqlBaseParser.INPUT - 64)) | (1 << (SqlBaseParser.INTERVAL - 64)) | (1 << (SqlBaseParser.INVOKER - 64)) | (1 << (SqlBaseParser.IO - 64)) | (1 << (SqlBaseParser.ISOLATION - 64)) | (1 << (SqlBaseParser.JSON - 64)) | (1 << (SqlBaseParser.LAST - 64)) | (1 << (SqlBaseParser.LATERAL - 64)) | (1 << (SqlBaseParser.LEVEL - 64)) | (1 << (SqlBaseParser.LIMIT - 64)) | (1 << (SqlBaseParser.LOCAL - 64)))) != 0) or ((((_la - 129)) & ~0x3f) == 0 and ((1 << (_la - 129)) & ((1 << (SqlBaseParser.LOGICAL - 129)) | (1 << (SqlBaseParser.MAP - 129)) | (1 << (SqlBaseParser.MATCH - 129)) | (1 << (SqlBaseParser.MATCHED - 129)) | (1 << (SqlBaseParser.MATCHES - 129)) | (1 << (SqlBaseParser.MATCH_RECOGNIZE - 129)) | (1 << (SqlBaseParser.MATERIALIZED - 129)) | (1 << (SqlBaseParser.MEASURES - 129)) | (1 << (SqlBaseParser.MERGE - 129)) | (1 << (SqlBaseParser.MINUTE - 129)) | (1 << (SqlBaseParser.MONTH - 129)) | (1 << (SqlBaseParser.NEXT - 129)) | (1 << (SqlBaseParser.NFC - 129)) | (1 << (SqlBaseParser.NFD - 129)) | (1 << (SqlBaseParser.NFKC - 129)) | (1 << (SqlBaseParser.NFKD - 129)) | (1 << (SqlBaseParser.NO - 129)) | (1 << (SqlBaseParser.NONE - 129)) | (1 << (SqlBaseParser.NULLIF - 129)) | (1 << (SqlBaseParser.NULLS - 129)) | (1 << (SqlBaseParser.OFFSET - 129)) | (1 << (SqlBaseParser.OMIT - 129)) | (1 << (SqlBaseParser.OF - 129)) | (1 << (SqlBaseParser.ONE - 129)) | (1 << (SqlBaseParser.ONLY - 129)) | (1 << (SqlBaseParser.OPTION - 129)) | (1 << (SqlBaseParser.ORDINALITY - 129)) | (1 << (SqlBaseParser.OUTPUT - 129)) | (1 << (SqlBaseParser.OVER - 129)) | (1 << (SqlBaseParser.OVERFLOW - 129)) | (1 << (SqlBaseParser.PARTITION - 129)) | (1 << (SqlBaseParser.PARTITIONS - 129)) | (1 << (SqlBaseParser.PAST - 129)) | (1 << (SqlBaseParser.PATH - 129)) | (1 << (SqlBaseParser.PATTERN - 129)) | (1 << (SqlBaseParser.PER - 129)) | (1 << (SqlBaseParser.PERMUTE - 129)) | (1 << (SqlBaseParser.POSITION - 129)) | (1 << (SqlBaseParser.PRECEDING - 129)) | (1 << (SqlBaseParser.PRECISION - 129)) | (1 << (SqlBaseParser.PRIVILEGES - 129)) | (1 << (SqlBaseParser.PROPERTIES - 129)) | (1 << (SqlBaseParser.RANGE - 129)) | (1 << (SqlBaseParser.READ - 129)) | (1 << (SqlBaseParser.REFRESH - 129)) | (1 << (SqlBaseParser.RENAME - 129)) | (1 << (SqlBaseParser.REPEATABLE - 129)) | (1 << (SqlBaseParser.REPLACE - 129)) | (1 << (SqlBaseParser.RESET - 129)) | (1 << (SqlBaseParser.RESPECT - 129)) | (1 << (SqlBaseParser.RESTRICT - 129)) | (1 << (SqlBaseParser.REVOKE - 129)) | (1 << (SqlBaseParser.ROLE - 129)))) != 0) or ((((_la - 193)) & ~0x3f) == 0 and ((1 << (_la - 193)) & ((1 << (SqlBaseParser.ROLES - 193)) | (1 << (SqlBaseParser.ROLLBACK - 193)) | (1 << (SqlBaseParser.ROW - 193)) | (1 << (SqlBaseParser.ROWS - 193)) | (1 << (SqlBaseParser.RUNNING - 193)) | (1 << (SqlBaseParser.SCHEMA - 193)) | (1 << (SqlBaseParser.SCHEMAS - 193)) | (1 << (SqlBaseParser.SECOND - 193)) | (1 << (SqlBaseParser.SECURITY - 193)) | (1 << (SqlBaseParser.SEEK - 193)) | (1 << (SqlBaseParser.SERIALIZABLE - 193)) | (1 << (SqlBaseParser.SESSION - 193)) | (1 << (SqlBaseParser.SET - 193)) | (1 << (SqlBaseParser.SETS - 193)) | (1 << (SqlBaseParser.SHOW - 193)) | (1 << (SqlBaseParser.SOME - 193)) | (1 << (SqlBaseParser.START - 193)) | (1 << (SqlBaseParser.STATS - 193)) | (1 << (SqlBaseParser.SUBSET - 193)) | (1 << (SqlBaseParser.SUBSTRING - 193)) | (1 << (SqlBaseParser.SYSTEM - 193)) | (1 << (SqlBaseParser.TABLES - 193)) | (1 << (SqlBaseParser.TABLESAMPLE - 193)) | (1 << (SqlBaseParser.TEXT - 193)) | (1 << (SqlBaseParser.TIES - 193)) | (1 << (SqlBaseParser.TIME - 193)) | (1 << (SqlBaseParser.TIMESTAMP - 193)) | (1 << (SqlBaseParser.TO - 193)) | (1 << (SqlBaseParser.TRANSACTION - 193)) | (1 << (SqlBaseParser.TRUNCATE - 193)) | (1 << (SqlBaseParser.TRY_CAST - 193)) | (1 << (SqlBaseParser.TYPE - 193)) | (1 << (SqlBaseParser.UNBOUNDED - 193)) | (1 << (SqlBaseParser.UNCOMMITTED - 193)) | (1 << (SqlBaseParser.UNMATCHED - 193)) | (1 << (SqlBaseParser.UPDATE - 193)) | (1 << (SqlBaseParser.USE - 193)) | (1 << (SqlBaseParser.USER - 193)) | (1 << (SqlBaseParser.VALIDATE - 193)) | (1 << (SqlBaseParser.VERBOSE - 193)) | (1 << (SqlBaseParser.VERSION - 193)) | (1 << (SqlBaseParser.VIEW - 193)) | (1 << (SqlBaseParser.WINDOW - 193)) | (1 << (SqlBaseParser.WITHIN - 193)) | (1 << (SqlBaseParser.WITHOUT - 193)) | (1 << (SqlBaseParser.WORK - 193)) | (1 << (SqlBaseParser.WRITE - 193)) | (1 << (SqlBaseParser.YEAR - 193)) | (1 << (SqlBaseParser.ZONE - 193)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[17] = self.queryTerm_sempred
self._predicates[29] = self.relation_sempred
self._predicates[47] = self.booleanExpression_sempred
self._predicates[49] = self.valueExpression_sempred
self._predicates[50] = self.primaryExpression_sempred
self._predicates[61] = self.type__sempred
self._predicates[71] = self.rowPattern_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def queryTerm_sempred(self, localctx:QueryTermContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 2)
if predIndex == 1:
return self.precpred(self._ctx, 1)
def relation_sempred(self, localctx:RelationContext, predIndex:int):
if predIndex == 2:
return self.precpred(self._ctx, 2)
def booleanExpression_sempred(self, localctx:BooleanExpressionContext, predIndex:int):
if predIndex == 3:
return self.precpred(self._ctx, 2)
if predIndex == 4:
return self.precpred(self._ctx, 1)
def valueExpression_sempred(self, localctx:ValueExpressionContext, predIndex:int):
if predIndex == 5:
return self.precpred(self._ctx, 3)
if predIndex == 6:
return self.precpred(self._ctx, 2)
if predIndex == 7:
return self.precpred(self._ctx, 1)
if predIndex == 8:
return self.precpred(self._ctx, 5)
def primaryExpression_sempred(self, localctx:PrimaryExpressionContext, predIndex:int):
if predIndex == 9:
return self.precpred(self._ctx, 17)
if predIndex == 10:
return self.precpred(self._ctx, 15)
def type__sempred(self, localctx:Type_Context, predIndex:int):
if predIndex == 11:
return self.precpred(self._ctx, 2)
def rowPattern_sempred(self, localctx:RowPatternContext, predIndex:int):
if predIndex == 12:
return self.precpred(self._ctx, 2)
if predIndex == 13:
return self.precpred(self._ctx, 1)
| true | true |
f7f599c755ab1a2b2a72a7f20c40f3bdc7832bc9 | 400 | py | Python | tf_covid19_care/trainers/_init_pathes.py | wangjuncongyu/COVID-19-Deep-Learning | 7a2613a978c7537a541ae1ac1ea89643d2fa9081 | [
"Apache-2.0"
] | null | null | null | tf_covid19_care/trainers/_init_pathes.py | wangjuncongyu/COVID-19-Deep-Learning | 7a2613a978c7537a541ae1ac1ea89643d2fa9081 | [
"Apache-2.0"
] | null | null | null | tf_covid19_care/trainers/_init_pathes.py | wangjuncongyu/COVID-19-Deep-Learning | 7a2613a978c7537a541ae1ac1ea89643d2fa9081 | [
"Apache-2.0"
] | 1 | 2021-12-03T04:17:51.000Z | 2021-12-03T04:17:51.000Z | import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
add_path(osp.join(this_dir, '..'))
# Add data_loader to PYTHONPATH
data_loader_path = osp.join(this_dir, '..', 'data_loader')
add_path(data_loader_path)
# Add util to PYTHONPATH
#utils_path = osp.join(this_dir, '..', 'utils')
#add_path(utils_path)
| 22.222222 | 58 | 0.7125 | import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
add_path(osp.join(this_dir, '..'))
data_loader_path = osp.join(this_dir, '..', 'data_loader')
add_path(data_loader_path)
| true | true |
f7f59b62bb3925d5a26dd4052ddb689617179e89 | 104 | py | Python | tests/exog/random/random_exog_300_1280.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/exog/random/random_exog_300_1280.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/exog/random/random_exog_300_1280.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 300,1280); | 26 | 55 | 0.865385 | import tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 300,1280); | true | true |
f7f59ca20dbf6defb9e9a297c84353a29f08b414 | 4,209 | py | Python | topi/python/topi/nn/pooling.py | baowenlei/tvm | 6b2e18ed96fad26b4a5e5f8a6dcbedf9206c9a65 | [
"Apache-2.0"
] | 1 | 2019-04-26T18:35:40.000Z | 2019-04-26T18:35:40.000Z | topi/python/topi/nn/pooling.py | baowenlei/tvm | 6b2e18ed96fad26b4a5e5f8a6dcbedf9206c9a65 | [
"Apache-2.0"
] | null | null | null | topi/python/topi/nn/pooling.py | baowenlei/tvm | 6b2e18ed96fad26b4a5e5f8a6dcbedf9206c9a65 | [
"Apache-2.0"
] | 2 | 2019-05-24T17:22:38.000Z | 2019-06-14T23:30:24.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator pooling compute."""
from __future__ import absolute_import
from .. import cpp
POOL_TYPE_CODE = {
"avg": 0,
"max": 1
}
def global_pool(data, pool_type, layout="NCHW"):
"""Perform global pooling on height and width dimension of data.
It decides the height and width dimension according to the layout string,
in which 'W' and 'H' means width and height respectively.
Width and height dimension cannot be split.
For example, NCHW, NCHW16c, etc. are valid for pool,
while NCHW16w, NCHW16h are not.
See parameter `layout` for more information of the layout string convention.
Parameters
----------
data : tvm.Tensor
n-D with shape of layout
pool_type : str
Pool type, 'max' or 'avg'
layout : str
Layout of the input data.
The layout is supposed to be composed of upper cases, lower cases and numbers,
where upper case indicates a dimension and
the corresponding lower case with factor size indicates the split dimension.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block],
in which channel_block=16 is a split of dimension channel.
Returns
-------
output : tvm.Tensor
n-D in same layout with height and width dimension size of 1.
e.g., for NCHW, the output shape will be [batch, channel, 1, 1]
"""
return cpp.nn.global_pool(data, POOL_TYPE_CODE[pool_type], layout)
def pool(data,
kernel,
stride,
padding,
pool_type,
ceil_mode=False,
layout="NCHW",
count_include_pad=True):
"""Perform pooling on height and width dimension of data.
It decides the height and width dimension according to the layout string,
in which 'W' and 'H' means width and height respectively.
Width and height dimension cannot be split.
For example, NCHW, NCHW16c, etc. are valid for pool,
while NCHW16w, NCHW16h are not.
See parameter `layout` for more information of the layout string convention.
Parameters
----------
data : tvm.Tensor
n-D with shape of layout
kernel : list/tuple of two ints
Kernel size, [kernel_height, kernel_width]
stride : list/tuple of two ints
Stride size, [stride_height, stride_width]
padding : list/tuple of four ints
Pad size, [pad_top, pad_left, pad_bottom, pad_right]]
pool_type : str
Pool type, 'max' or 'avg'
ceil_mode : bool
Whether to use ceil when calculating output size.
layout: string
Layout of the input data.
The layout is supposed to be composed of upper cases, lower cases and numbers,
where upper case indicates a dimension and
the corresponding lower case with factor size indicates the split dimension.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block],
in which channel_block=16 is a split of dimension channel.
count_include_pad: bool
Whether include padding in the calculation when pool_type is 'avg'
Returns
-------
output : tvm.Tensor
n-D in the same layout
"""
return cpp.nn.pool(data, kernel, stride, padding,
POOL_TYPE_CODE[pool_type], ceil_mode, layout, count_include_pad)
| 36.284483 | 87 | 0.677833 |
from __future__ import absolute_import
from .. import cpp
POOL_TYPE_CODE = {
"avg": 0,
"max": 1
}
def global_pool(data, pool_type, layout="NCHW"):
return cpp.nn.global_pool(data, POOL_TYPE_CODE[pool_type], layout)
def pool(data,
kernel,
stride,
padding,
pool_type,
ceil_mode=False,
layout="NCHW",
count_include_pad=True):
return cpp.nn.pool(data, kernel, stride, padding,
POOL_TYPE_CODE[pool_type], ceil_mode, layout, count_include_pad)
| true | true |
f7f59cfc76764e2d9333ff393aef46fdfa2c2ac8 | 8,832 | py | Python | networks/modular_downscaling_model/core_modules/ResUNetSuper.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | 5 | 2021-05-05T06:08:52.000Z | 2022-03-24T04:57:52.000Z | networks/modular_downscaling_model/core_modules/ResUNetSuper.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | null | null | null | networks/modular_downscaling_model/core_modules/ResUNetSuper.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | 2 | 2021-08-07T05:18:05.000Z | 2022-03-31T03:48:37.000Z | import torch.nn as nn
from networks.modular_downscaling_model.base_modules import ConvBlock, ResNetMultiBlock, ConvMultiBlock
from networks.modular_downscaling_model.core_modules.unet_template import BaseUNet
from networks.modular_downscaling_model.core_modules.unet_template.SkipConnectionModule import SkipConnectionModule
from networks.modular_downscaling_model.core_modules.unet_template.DecodingModule import DecodingModule
class ResUNetSuper(BaseUNet):
"""
This is a U-Net with Residual Blocks that directly performs upsampling on low-res input.
The difference to the "old" (legacy) Residual U-Net is that after processing the input in low-res
resolution we directly upsampling to the target resolution and perform encoding and decoding to the
final image. This works better than the usual procedure (inserting a U-Net inbetween input processing
and supersampling blocks). Dropout in both encode and decode parts improve overfitting
(control with use_dropout_decode). However, one can use dropout in the encoding layers only.
Note that we operate on low-resolution inputs as this reduced the size of the network. Operating on
high-res is also possible (disable init_upsampling), however this increases the network size and the
time to complete the training, and it does not lead to better results. There is also an option
to add an additional convolution layer block to process the (upsampled) input (with feature_channels).
"""
__options__ = {
'input_channels': 4,
'kernel_size': 3,
'padding_mode': 'replication',
'leaky_slope': 0.2,
'dropout_rate': 0.1,
'batch_norm': True,
'interpolation_mode': 'bilinear',
'residual_blocks_per_module': 1,
'layers_per_residual_block': 2,
'use_dropout_decode': True,
'init_upsampling': True,
'init_convolution': False,
'feature_channels': 64,
}
def __init__(self, **kwargs):
super(ResUNetSuper, self).__init__(**kwargs)
self._build_unet()
self.output_channels = self.model.outer_channels
def _processing_module(
self,
input_channels, **kwargs
):
module = ResNetMultiBlock(
input_channels=input_channels, feature_channels=input_channels, kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
use_batch_norm=self.batch_norm,
num_resnet_blocks=self.residual_blocks_per_module,
num_hidden_layers=self.layers_per_residual_block,
)
return module
def _downsampling_module(
self,
input_channels, output_channels, scale_factor, **kwargs
):
module = ConvBlock(
input_channels=input_channels, output_channels=output_channels,
stride=scale_factor, kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
use_batch_norm=self.batch_norm,
)
return module
def _upsampling_resolution_module(
self,
input_channels, output_channels, scale_factor, **kwargs
):
module = nn.Upsample(scale_factor=scale_factor, mode=self.interpolation_mode)
# block = [nn.Upsample(scale_factor=scale_factor, mode=self.interpolation_mode)]
# block += [ConvBlock(
# input_channels=input_channels, output_channels=output_channels,
# kernel_size=self.kernel_size,
# padding_mode=self.padding_mode,
# leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
# use_batch_norm=self.batch_norm
# )]
# module = nn.Sequential(*block)
return module
def _upsampling_feature_module(
self,
input_channels, output_channels, scale_factor, **kwargs
):
module = ConvBlock(
input_channels=input_channels, output_channels=output_channels,
kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
use_batch_norm=self.batch_norm
)
return module
def _processing_module_decode(
self,
input_channels, **kwargs
):
module = ResNetMultiBlock(
input_channels=input_channels, feature_channels=input_channels, kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=0.0,
use_batch_norm=self.batch_norm,
num_resnet_blocks=self.residual_blocks_per_module,
num_hidden_layers=self.layers_per_residual_block,
)
return module
def _decoding_module(
self,
input_channels, output_channels, scale_factor, **kwargs
):
if self.use_dropout_decode:
module = DecodingModule(
self._upsampling_module(input_channels, output_channels, scale_factor, **kwargs),
self._processing_module(output_channels, **kwargs),
inner_channels=input_channels,
outer_channels=output_channels
)
else:
module = DecodingModule(
self._upsampling_module(input_channels, output_channels, scale_factor, **kwargs),
self._processing_module_decode(output_channels, **kwargs),
inner_channels=input_channels,
outer_channels=output_channels
)
return module
def _build_init_module(self, input_channels, output_channels):
"""
Build an input module to process the data. For now, we assume that input has been processed
beforehand and we just deal with this processed input. No raw data processing is supported by default.
However, enabling init convolution enables the network to cope with raw data, as well.
Then, see that the input_channels match the number of input features.
"""
block = []
if self.init_upsampling:
# upsample image to final target resolution
block += [nn.Upsample(scale_factor=(4, 3), mode=self.interpolation_mode)]
if self.init_convolution:
# process the input with a convolution block
block += ConvBlock(
input_channels=input_channels, output_channels=output_channels,
kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
use_batch_norm=self.batch_norm
)
self.upsample_input = nn.Sequential(*block)
def _build_unet(self):
"""
This function overrides the BaseUNet function to build a U-Net. Note that we do not perform
a "default" standard U-Net but included 3 additional encoding / decoding layers to reduce
the target high-res resolution to small dimension and vice versa.
"""
# handle either processed input or raw input data
init_output_channels = self.feature_channels if self.init_convolution else self.input_channels
self._build_init_module(self.input_channels, init_output_channels)
model = self._skip_connection_module(
6 * init_output_channels, 7 * init_output_channels,
(2, 2),
inner_module=None
)
model = self._skip_connection_module(
5 * init_output_channels, 6 * init_output_channels,
(2, 2),
inner_module=model
)
model = self._skip_connection_module(
4 * init_output_channels, 5 * init_output_channels,
(3, 3),
inner_module=model
)
model = self._skip_connection_module(
3 * init_output_channels, 4 * init_output_channels,
(2, 1),
inner_module=model
)
model = self._skip_connection_module(
2 * init_output_channels, 3 * init_output_channels,
(1, 3),
inner_module=model
)
model = self._skip_connection_module(
init_output_channels, 2 * init_output_channels,
(2, 1),
inner_module=model
)
self.model = model
def forward(self, features):
"""
The difference to the standard forward method is that we first upsample the image
to the target resolution
"""
output = self.upsample_input(features)
output = self.model(output)
return output
if __name__ == '__main__':
model = ResUNetSuper()
print(model)
| 40.328767 | 115 | 0.658741 | import torch.nn as nn
from networks.modular_downscaling_model.base_modules import ConvBlock, ResNetMultiBlock, ConvMultiBlock
from networks.modular_downscaling_model.core_modules.unet_template import BaseUNet
from networks.modular_downscaling_model.core_modules.unet_template.SkipConnectionModule import SkipConnectionModule
from networks.modular_downscaling_model.core_modules.unet_template.DecodingModule import DecodingModule
class ResUNetSuper(BaseUNet):
__options__ = {
'input_channels': 4,
'kernel_size': 3,
'padding_mode': 'replication',
'leaky_slope': 0.2,
'dropout_rate': 0.1,
'batch_norm': True,
'interpolation_mode': 'bilinear',
'residual_blocks_per_module': 1,
'layers_per_residual_block': 2,
'use_dropout_decode': True,
'init_upsampling': True,
'init_convolution': False,
'feature_channels': 64,
}
def __init__(self, **kwargs):
super(ResUNetSuper, self).__init__(**kwargs)
self._build_unet()
self.output_channels = self.model.outer_channels
def _processing_module(
self,
input_channels, **kwargs
):
module = ResNetMultiBlock(
input_channels=input_channels, feature_channels=input_channels, kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
use_batch_norm=self.batch_norm,
num_resnet_blocks=self.residual_blocks_per_module,
num_hidden_layers=self.layers_per_residual_block,
)
return module
def _downsampling_module(
self,
input_channels, output_channels, scale_factor, **kwargs
):
module = ConvBlock(
input_channels=input_channels, output_channels=output_channels,
stride=scale_factor, kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
use_batch_norm=self.batch_norm,
)
return module
def _upsampling_resolution_module(
self,
input_channels, output_channels, scale_factor, **kwargs
):
module = nn.Upsample(scale_factor=scale_factor, mode=self.interpolation_mode)
return module
def _upsampling_feature_module(
self,
input_channels, output_channels, scale_factor, **kwargs
):
module = ConvBlock(
input_channels=input_channels, output_channels=output_channels,
kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
use_batch_norm=self.batch_norm
)
return module
def _processing_module_decode(
self,
input_channels, **kwargs
):
module = ResNetMultiBlock(
input_channels=input_channels, feature_channels=input_channels, kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=0.0,
use_batch_norm=self.batch_norm,
num_resnet_blocks=self.residual_blocks_per_module,
num_hidden_layers=self.layers_per_residual_block,
)
return module
def _decoding_module(
self,
input_channels, output_channels, scale_factor, **kwargs
):
if self.use_dropout_decode:
module = DecodingModule(
self._upsampling_module(input_channels, output_channels, scale_factor, **kwargs),
self._processing_module(output_channels, **kwargs),
inner_channels=input_channels,
outer_channels=output_channels
)
else:
module = DecodingModule(
self._upsampling_module(input_channels, output_channels, scale_factor, **kwargs),
self._processing_module_decode(output_channels, **kwargs),
inner_channels=input_channels,
outer_channels=output_channels
)
return module
def _build_init_module(self, input_channels, output_channels):
block = []
if self.init_upsampling:
block += [nn.Upsample(scale_factor=(4, 3), mode=self.interpolation_mode)]
if self.init_convolution:
block += ConvBlock(
input_channels=input_channels, output_channels=output_channels,
kernel_size=self.kernel_size,
padding_mode=self.padding_mode,
leaky_slope=self.leaky_slope, dropout_rate=self.dropout_rate,
use_batch_norm=self.batch_norm
)
self.upsample_input = nn.Sequential(*block)
def _build_unet(self):
init_output_channels = self.feature_channels if self.init_convolution else self.input_channels
self._build_init_module(self.input_channels, init_output_channels)
model = self._skip_connection_module(
6 * init_output_channels, 7 * init_output_channels,
(2, 2),
inner_module=None
)
model = self._skip_connection_module(
5 * init_output_channels, 6 * init_output_channels,
(2, 2),
inner_module=model
)
model = self._skip_connection_module(
4 * init_output_channels, 5 * init_output_channels,
(3, 3),
inner_module=model
)
model = self._skip_connection_module(
3 * init_output_channels, 4 * init_output_channels,
(2, 1),
inner_module=model
)
model = self._skip_connection_module(
2 * init_output_channels, 3 * init_output_channels,
(1, 3),
inner_module=model
)
model = self._skip_connection_module(
init_output_channels, 2 * init_output_channels,
(2, 1),
inner_module=model
)
self.model = model
def forward(self, features):
output = self.upsample_input(features)
output = self.model(output)
return output
if __name__ == '__main__':
model = ResUNetSuper()
print(model)
| true | true |
f7f59d1f6caa35429df9a2262629db0c7253b18c | 4,078 | py | Python | io_equalizerviz.py | PROPHESSOR/Blender-Equalizer-Audio-Visualizer | 0405f2c47a76b47f22948a65561db5efe66f5bd8 | [
"MIT"
] | null | null | null | io_equalizerviz.py | PROPHESSOR/Blender-Equalizer-Audio-Visualizer | 0405f2c47a76b47f22948a65561db5efe66f5bd8 | [
"MIT"
] | 2 | 2020-01-03T23:53:04.000Z | 2020-01-03T23:54:49.000Z | io_equalizerviz.py | PROPHESSOR/Blender-Equalizer-Audio-Visualizer | 0405f2c47a76b47f22948a65561db5efe66f5bd8 | [
"MIT"
] | null | null | null | # EqualizerViz - Audio visualization plugin
# Created by PROPHESSOR for Blender 2.80 (04.01.2020)
#
# Based on sirrandalot's "Audio visualisation script" for Blender 2.71
import bpy
from bpy_extras.io_utils import ImportHelper
from bpy.props import IntProperty, StringProperty
from bpy.types import Operator
from bpy_extras.wm_utils.progress_report import ProgressReport
bl_info = {
"name": "Import Equalizer Audio",
"author": "PROPHESSOR",
"description": "Imports the audio file to create equalizer visualization. Wav import is more faster.",
"version": (1, 0, 1),
"blender": (2, 80, 0),
"location": "File > Import > Equalizer",
"url": "https://github.com/PROPHESSOR/Blender-Equalizer-Audio-Visualizer",
"tracker_url": "https://github.com/Blender/Blender-Equalizer-Audio-Visualizer/issues",
"category": "Import-Export"
}
def menu_func_import(self, context):
self.layout.operator(ImportEqualizerAudioFile.bl_idname, text="Audio for EqualizerViz")
def register():
bpy.utils.register_class(ImportEqualizerAudioFile)
# Add import menu item
if hasattr(bpy.types, 'TOPBAR_MT_file_import'):
#2.8+
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
else:
bpy.types.INFO_MT_file_import.append(menu_func_import)
class ImportEqualizerAudioFile(Operator, ImportHelper):
"""Imports the audio file to visualize using equalizer simulator"""
bl_idname = "equalizerviz_blender.import_audio"
bl_label = "Import audio file to visualize. Wav is more faster."
filename_ext = ".wav" # Wav import is more faster
#filter_glob = StringProperty(
# default = "*.wav",
# options = { 'HIDDEN' },
# maxlen= 255
#)
numbars = IntProperty(
name="Number of equalizer bars",
description=(
"Number of bars and frequency ranges."
),
default=64
)
def execute(self, context):
with ProgressReport(context.window_manager) as progress:
progress.enter_substeps(self.numbars, "Importing frequency %d ranges as bars %r..." % (self.numbars, self.filepath))
for i in range(0, self.numbars):
# Add a plane and set it's origin to one of its edges
bpy.ops.mesh.primitive_plane_add(location=((i + (i * 0.5)), 0, 0))
bpy.context.scene.cursor.location = bpy.context.active_object.location
bpy.context.scene.cursor.location.y -= 1
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Scale the plane on the x and y axis, then apply the transformation
bpy.context.active_object.scale.x = 0.5
bpy.context.active_object.scale.y = 20
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
# Insert a scaling keyframe and lock the x and z axis
bpy.ops.anim.keyframe_insert_menu(type='Scaling')
bpy.context.active_object.animation_data.action.fcurves[0].lock = True
bpy.context.active_object.animation_data.action.fcurves[2].lock = True
# Set the window context to the graph editor
bpy.context.area.type = 'GRAPH_EDITOR'
# Expression to determine the frequency range of the bars
low = i**2 + 20
high = (i + 1)**2 + 20
progress.step("Bar %d of %d: %d Hz - %d Hz. Baking..." % (i, self.numbars, low, high))
# Bake that range of frequencies to the current plane (along the y axis)
bpy.ops.graph.sound_bake(filepath=self.filepath, low=(low), high=(high))
# Lock the y axis
bpy.context.active_object.animation_data.action.fcurves[1].lock = True
progress.leave_substeps("Done.")
return { "FINISHED" }
if __name__ == "__main__":
register()
| 39.592233 | 128 | 0.619667 |
import bpy
from bpy_extras.io_utils import ImportHelper
from bpy.props import IntProperty, StringProperty
from bpy.types import Operator
from bpy_extras.wm_utils.progress_report import ProgressReport
bl_info = {
"name": "Import Equalizer Audio",
"author": "PROPHESSOR",
"description": "Imports the audio file to create equalizer visualization. Wav import is more faster.",
"version": (1, 0, 1),
"blender": (2, 80, 0),
"location": "File > Import > Equalizer",
"url": "https://github.com/PROPHESSOR/Blender-Equalizer-Audio-Visualizer",
"tracker_url": "https://github.com/Blender/Blender-Equalizer-Audio-Visualizer/issues",
"category": "Import-Export"
}
def menu_func_import(self, context):
self.layout.operator(ImportEqualizerAudioFile.bl_idname, text="Audio for EqualizerViz")
def register():
bpy.utils.register_class(ImportEqualizerAudioFile)
# Add import menu item
if hasattr(bpy.types, 'TOPBAR_MT_file_import'):
#2.8+
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
else:
bpy.types.INFO_MT_file_import.append(menu_func_import)
class ImportEqualizerAudioFile(Operator, ImportHelper):
bl_idname = "equalizerviz_blender.import_audio"
bl_label = "Import audio file to visualize. Wav is more faster."
filename_ext = ".wav" # Wav import is more faster
#filter_glob = StringProperty(
# default = "*.wav",
# options = { 'HIDDEN' },
# maxlen= 255
#)
numbars = IntProperty(
name="Number of equalizer bars",
description=(
"Number of bars and frequency ranges."
),
default=64
)
def execute(self, context):
with ProgressReport(context.window_manager) as progress:
progress.enter_substeps(self.numbars, "Importing frequency %d ranges as bars %r..." % (self.numbars, self.filepath))
for i in range(0, self.numbars):
# Add a plane and set it's origin to one of its edges
bpy.ops.mesh.primitive_plane_add(location=((i + (i * 0.5)), 0, 0))
bpy.context.scene.cursor.location = bpy.context.active_object.location
bpy.context.scene.cursor.location.y -= 1
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
bpy.context.active_object.scale.x = 0.5
bpy.context.active_object.scale.y = 20
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
bpy.ops.anim.keyframe_insert_menu(type='Scaling')
bpy.context.active_object.animation_data.action.fcurves[0].lock = True
bpy.context.active_object.animation_data.action.fcurves[2].lock = True
bpy.context.area.type = 'GRAPH_EDITOR'
low = i**2 + 20
high = (i + 1)**2 + 20
progress.step("Bar %d of %d: %d Hz - %d Hz. Baking..." % (i, self.numbars, low, high))
bpy.ops.graph.sound_bake(filepath=self.filepath, low=(low), high=(high))
bpy.context.active_object.animation_data.action.fcurves[1].lock = True
progress.leave_substeps("Done.")
return { "FINISHED" }
if __name__ == "__main__":
register()
| true | true |
f7f59ebdf95a70dd2250fac6bef87b26ce986d59 | 245 | py | Python | examples/add_profile.py | vshn/hatchbuck | 0af768195dd3bc8b5e3899dccba121f42d307603 | [
"BSD-3-Clause"
] | null | null | null | examples/add_profile.py | vshn/hatchbuck | 0af768195dd3bc8b5e3899dccba121f42d307603 | [
"BSD-3-Clause"
] | 13 | 2018-03-27T10:08:52.000Z | 2021-01-05T17:53:21.000Z | examples/add_profile.py | vshn/hatchbuck | 0af768195dd3bc8b5e3899dccba121f42d307603 | [
"BSD-3-Clause"
] | 1 | 2018-05-15T13:10:34.000Z | 2018-05-15T13:10:34.000Z | from hatchbuck import Hatchbuck
import pprint
import sys
pp = pprint.PrettyPrinter()
hatchbuck = Hatchbuck(sys.argv[1])
profile = hatchbuck.profile_add(
"emails", "address", "baschar.said@hotmail.com", {"type": "Home"}
)
pp.pprint(profile)
| 22.272727 | 69 | 0.734694 | from hatchbuck import Hatchbuck
import pprint
import sys
pp = pprint.PrettyPrinter()
hatchbuck = Hatchbuck(sys.argv[1])
profile = hatchbuck.profile_add(
"emails", "address", "baschar.said@hotmail.com", {"type": "Home"}
)
pp.pprint(profile)
| true | true |
f7f59f58422e9f6da30460938aab47c459498a56 | 18,253 | py | Python | ifis_tools/from_taudem.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | 3 | 2019-09-05T14:47:02.000Z | 2021-11-12T15:31:56.000Z | ifis_tools/from_taudem.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | 2 | 2019-11-13T21:36:22.000Z | 2019-12-16T21:16:43.000Z | ifis_tools/from_taudem.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | null | null | null | import pandas as pd
import geopandas as gp
import numpy as np
import pylab as pl
from struct import pack, unpack
import io
import gdal
from osgeo import ogr
import osgeo
#from wmf import wmf
pd.options.mode.chained_assignment = None
def read_raster(path_map,isDEMorDIR=False,dxp=None, noDataP = None,isDIR = False,DIRformat = 'r.watershed'):
'Funcion: read_map\n'\
'Descripcion: Lee un mapa raster soportado por GDAL.\n'\
'Parametros Obligatorios:.\n'\
' -path_map: path donde se encuentra el mapa.\n'\
'Parametros Opcionales:.\n'\
' -isDEMorDIR: Pasa las propiedades de los mapas al modulo cuencas \n'\
' escrito en fortran \n'\
' -dxp: tamano plano del mapa\n'\
' -noDataP: Valor para datos nulos en el mapa (-9999)\n'\
' -DIRformat: donde se ha conseguido el mapa dir (r.watershed) \n'\
' - r.watershed: mapa de direcciones obtenido por la funcion de GRASS\n'\
' - opentopo: mapa de direcciones de http://www.opentopography.org/\n'\
' -isDIR: (FALSE) es este un mapa de direcciones\n'\
'Retorno:.\n'\
' Si no es DEM o DIR retorna todas las propieades del elemento en un vector.\n'\
' En el siguiente orden: ncols,nrows,xll,yll,dx,nodata.\n'\
' Si es DEM o DIR le pasa las propieades a cuencas para el posterior trazado.\n'\
' de cuencas y link_ids.\n' \
#Abre el mapa
direction=gdal.Open(path_map)
#Projection
proj = osgeo.osr.SpatialReference(wkt=direction.GetProjection())
EPSG_code = proj.GetAttrValue('AUTHORITY',1)
#lee la informacion del mapa
ncols=direction.RasterXSize
nrows=direction.RasterYSize
banda=direction.GetRasterBand(1)
noData=banda.GetNoDataValue()
geoT=direction.GetGeoTransform()
dx=geoT[1]
dy = np.abs(geoT[-1])
xll=geoT[0]; yll=geoT[3]-nrows*dy
#lee el mapa
Mapa=direction.ReadAsArray()
direction.FlushCache()
del direction
return Mapa.T.astype(float),[ncols,nrows,xll,yll,dx,dy,noData],EPSG_code
def save_array2raster(Array, ArrayProp, path, EPSG = 4326, Format = 'GTiff'):
dst_filename = path
#Formato de condiciones del mapa
x_pixels = Array.shape[0] # number of pixels in x
y_pixels = Array.shape[1] # number of pixels in y
PIXEL_SIZE_x = ArrayProp[4] # size of the pixel...
PIXEL_SIZE_y = ArrayProp[5] # size of the pixel...
x_min = ArrayProp[2]
y_max = ArrayProp[3] + ArrayProp[5] * ArrayProp[1] # x_min & y_max are like the "top left" corner.
driver = gdal.GetDriverByName(Format)
#Para encontrar el formato de GDAL
NP2GDAL_CONVERSION = {
"uint8": 1,
"int8": 1,
"uint16": 2,
"int16": 3,
"uint32": 4,
"int32": 5,
"float32": 6,
"float64": 7,
"complex64": 10,
"complex128": 11,
}
gdaltype = NP2GDAL_CONVERSION[Array.dtype.name]
# Crea el driver
dataset = driver.Create(
dst_filename,
x_pixels,
y_pixels,
1,
gdaltype,)
#coloca la referencia espacial
dataset.SetGeoTransform((
x_min, # 0
PIXEL_SIZE_x, # 1
0, # 2
y_max, # 3
0, # 4
-PIXEL_SIZE_y))
#coloca la proyeccion a partir de un EPSG
proj = osgeo.osr.SpatialReference()
texto = 'EPSG:' + str(EPSG)
proj.SetWellKnownGeogCS( texto )
dataset.SetProjection(proj.ExportToWkt())
#Coloca el nodata
band = dataset.GetRasterBand(1)
if ArrayProp[-1] is None:
band.SetNoDataValue(wmf.cu.nodata.astype(int).max())
else:
band.SetNoDataValue(int(ArrayProp[-1]))
#Guarda el mapa
dataset.GetRasterBand(1).WriteArray(Array.T)
dataset.FlushCache()
def rainfall_raster_ranks(path_rain_frame, path_ranks):
# Reads a raster of the rainfall fields and creates a raster with the ranks
m, p, epsg = read_raster(path_rain_frame)
rank = np.arange(1,m.size+1)
rank = rank.reshape(m.shape)
save_array2raster(rank , p, path_ranks+'.tif', EPSG=int(epsg))
# Creates a ranks polygon based on the raster ranks.
src_ds = gdal.Open(path_ranks+'.tif')
srcband = src_ds.GetRasterBand(1)
#Create output datasource
spatialReference = osgeo.osr.SpatialReference()
spatialReference.ImportFromEPSG(int(epsg))
dst_layername = path_ranks
drv = ogr.GetDriverByName("ESRI Shapefile")
dst_ds = drv.CreateDataSource( dst_layername + ".shp" )
dst_layer = dst_ds.CreateLayer(dst_layername, spatialReference )
gdal.Polygonize( srcband, None, dst_layer, -1, [], callback=None )
dst_ds.Destroy()
def saveBin(lid, lid_vals, count, fn):
io_buffer_size = 4+4*100000
if count > 0:
lid = (lid[lid_vals > 1])
lid_vals = (lid_vals[lid_vals > 1])
fh = io.open(fn, 'wb', io_buffer_size)
fh.write(pack('<I', count))
for vals in zip(lid, lid_vals):
fh.write(pack('<If', *vals))
fh.close()
class network:
def __init__(self, net_path, hills_path = None, hills_epsg = 2163):
'''Defines the network class that contains all the requirements to set up a project for
hlm'''
#Defines the initial partameters for the network
self.network = gp.read_file(net_path)
self.network['link'] = self.network['LINKNO']
self.network.set_index('LINKNO', inplace=True)
self.network_centroids = None
self.network_ranks = None
#computes the area for each hillslope
if hills_path is not None:
self.hills = gp.read_file(hills_path)
self.hills.rename(columns={'DN':'link'}, inplace = True)
self.hills.set_index('link', inplace = True)
self.hills.to_crs(epsg = hills_epsg, inplace = True)
idx = self.hills.index.intersection(self.network.index)
self.network['area'] = self.hills.loc[idx].geometry.area/1e6
print('Area of each hillslope computed from the hills shapefile')
def network2points(self):
'''Converts the network elements to centroids, ideal to get the
rainfall ranks references'''
x =[]
y = []
for link in self.network.index:
geo = self.network.loc[link, 'geometry']
x.append(geo.centroid.x)
y.append(geo.centroid.y)
net_centroids = gp.GeoDataFrame(self.network[['link','strmOrder']], geometry = gp.points_from_xy(x, y),
crs = self.network.crs)
self.network_centroids = net_centroids
print('Centroids had been saved under self.network_centroids')
#return net_centroids
def get_rainfall_lookup(self, path_rain_ranks):
'''Generates the lookup table between the links and a rainfall that is going to be used
the rain ranks must be the one obtained with *rainfall_raster_ranks*. By now this operation
is done one to one.'''
# Reads the rainfall ranks and project it
rain_ranks = gp.read_file(path_rain_ranks)
rain_ranks = rain_ranks.to_crs(self.network.crs)
print('1. rain ranks readed and projected to the current crs')
# Checks if centroids are already defined
if self.network_centroids is None:
print('2. Network points not defined, defining them...')
self.network2points()
print('3. Network points defined')
# Performs the spatial join
points_ranked = gp.sjoin(self.network_centroids, rain_ranks, how = 'left', op = 'within')
self.rain_ranks = points_ranked
print('4. ranks obtained results stored in self.rain_ranks')
def rain2links(self, rain, path_rain = None):
'''Converts a grid (tif) file of rainfall to the shape of the network
using the lookup table obtained by *get_rainfall_lookup*'''
if rain is None:
if path_rain is not None:
#Read and transform rainfall to its ranks
rain, p, ep = read_raster(path_rain)
rain = rain.T
rain = rain.reshape(rain.size)
else:
print('Error: No rain variable, no path to rain variable')
else:
rain = rain.reshape(rain.size)
#Put the rinfall in links
self.rain_ranks['rain'] = 0
self.rain_ranks['rain'] = rain[self.rain_ranks['FID']]
# Return the links and the rainfall
return self.rain_ranks['rain']
def write_rvr(self, path, sub_net = None):
'''Writes and rvr file based on a network extracted from the base network'''
#Selects the subnet if it is avaiable
if sub_net is not None:
net_elem = sub_net
else:
net_elem = self.network
#Writes the rvr file for HLM
with open(path,'w',newline='\n') as f:
f.write('%d\n' % net_elem.shape[0])
f.write('\n')
for link in net_elem.index:
f.write('%d\n' % link)
if net_elem.loc[link,'USLINKNO1'] == -1:
f.write('0\n')
else:
f.write('2 %d %d\n' % (net_elem.loc[link,'USLINKNO1'], net_elem.loc[link,'USLINKNO2']))
f.write('\n')
f.close()
def get_subnet(self, link):
'''Allows to define a new network inside of the base network'''
lista = [link]
count = 0
while count < len(lista) or count > self.network.shape[0]:
link = lista[count]
if self.network.loc[link, 'USLINKNO1'] != -1:
lista.append(self.network.loc[link, 'USLINKNO1'])
lista.append(self.network.loc[link, 'USLINKNO2'])
count += 1
return network(self.network.loc[lista])
def get_prm(self):
for_prm = self.network[['DSContArea','Length','area']]
for_prm['DSContArea'] = for_prm['DSContArea'] / 1e6
for_prm.shape[0] == self.network.shape[0]
for_prm.loc[for_prm['Length'] == 0, 'Length'] = 1
for_prm.loc[for_prm['area'] == 0, 'area'] = 1/1e4
for_prm.loc[np.isnan(for_prm.prm['area']), 'area'] = 1/1e4
for_prm['Length'] = for_prm['Length'] / 1000
self.prm = for_prm
def set_prm_for_model(self, model = 608):
if model == 608 or model == 609:
attr = {'vh':0.02,'a_r':1.67,'a':3.2e-6,'b':17,'c':5.4e-7,'d':32,
'k3':2.045e-6,'ki_fac':0.07,'TopDepth':0.1,'NoFlow':1.48,'Td':999,
'Beta':1.67,'lambda1':0.4,'lambda2':-0.1,'vo':0.435}
self.prm_format = {'DSContArea':'%.3f','Length':'%.3f','area':'%.5f',
'vh':'%.4f','a_r':'%.4f','a':'%.2e','b':'%.1f','c':'%.2e','d':'%.1f',
'k3':'%.2e','ki_fac':'%.3f','TopDepth':'%.3f','NoFlow':'%.3f','Td':'%.2f',
'Beta':'%.3f','lambda1':'%.3f','lambda2':'%.2f','vo':'%.3f'}
self.prm = self.prm.assign(**attr)
elif model == 254 or model == 253 or model == 256:
self.prm_format = {'DSContArea':'%.3f','Length':'%.3f','area':'%.5f'}
def write_prm(self, path):
with open(path,'w',newline='\n') as f:
f.write('%d\n\n' % self.prm.shape[0])
for link in self.prm.index:
f.write('%d\n' % link)
for c,k in zip(self.prm.loc[link],self.prm_format.keys()):
fm = self.prm_format[k]+' '
f.write(fm % c)
f.write('\n\n')
def write_Global(self, path2global, model_uid = 604,
date1 = None, date2 = None, rvrFile = None, rvrType = 0, rvrLink = 0, prmFile = None, prmType = 0, initialFile = None,
initialType = 1,rainType = 5, rainPath = None, evpFile = 'evap.mon', datResults = None,
nComponents = 1, Components = [0], controlFile = None, baseGlobal = None, noWarning = False, snapType = 0,
snapPath = '', snapTime = '', evpFromSysPath = False):
'''Creates a global file for the current project.
- model_uid: is the number of hte model goes from 601 to 604.
- date1 and date2: initial date and end date
- rvrFile: path to rvr file.
- rvrType: 0: .rvr file, 1: databse .dbc file.
- rvrLink: 0: all the domain, N: number of the linkid.
- prmFile: path to prm file.
- prmType: 0: .prm file, 1: databse .dbc file.
- initialFile: path to file with initial conditions.
- initialType: type of initial file:
- 0: ini, 1: uini, 2: rec, 3: .dbc
- rainType: number inficating the type of the rain to be used.
- 1: plain text with rainfall data for each link.
- 3: Database.
- 4: Uniform storm file: .ustr
- 5: Binary data with the unix time
- rainPath: path to the folder containning the binary files of the rain.
or path to the file with the dabase
- evpFile: path to the file with the values of the evp.
- datResults: File where .dat files will be written.
- nComponents: Number of results to put in the .dat file.
- Components: Number of each component to write: [0,1,2,...,N]
- controlFile: File with the number of the links to write.
- baseGlobal: give the option to use a base global that is not the default
- snapType: type of snapshot to make with the model:
- 0: no snapshot, 1: .rec file, 2: to database, 3: to hdf5, 4:
recurrent hdf5
- snapPath: path to the snapshot.
- snapTime: time interval between snapshots (min)
- evpFromSysPath: add the path of the system to the evp file.'''
#Open the base global file and creates tyhe template
if baseGlobal is not None:
f = open(baseGlobal, 'r')
L = f.readlines()
f.close()
else:
L = Globals['60X']
t = []
for i in L:
t += i
Base = Template(''.join(t))
# Databse rainfall
if rainType == 3 and rainPath is None:
rainPath = '/Dedicated/IFC/model_eval/forcing_rain51_5435_s4.dbc'
if rvrType == 1 and rvrFile is None:
rvrFile = '/Dedicated/IFC/model_eval/topo51.dbc'
#Chang the evp path
if evpFromSysPath:
evpFile = Path + evpFile
# Creates the default Dictionary.
Default = {
'model_uid' : model_uid,
'date1': date1,
'date2': date2,
'rvrFile': rvrFile,
'rvrType': str(rvrType),
'rvrLink': str(rvrLink),
'prmFile': prmFile,
'prmType': str(prmType),
'initialFile': initialFile,
'initialType': initialType,
'rainType': str(rainType),
'rainPath': rainPath,
'evpFile': evpFile,
'datResults': datResults,
'controlFile': controlFile,
'snapType': str(snapType),
'snapPath': snapPath,
'snapTime': str(snapTime),
'nComp': str(nComponents)
}
if date1 is not None:
Default.update({'unix1': aux.__datetime2unix__(Default['date1'])})
else:
Default.update({'unix1': '$'+'unix1'})
if date2 is not None:
Default.update({'unix2': aux.__datetime2unix__(Default['date2'])})
else:
Default.update({'unix2': '$'+'unix2'})
#Update the list of components to write
for n, c in enumerate(Components):
Default.update({'Comp'+str(n): 'State'+str(c)})
if nComponents <= 9:
for c in range(9-nComponents):
Default.update({'Comp'+str(8-c): 'XXXXX'})
#Check for parameters left undefined
D = {}
for k in Default.keys():
if Default[k] is not None:
D.update({k: Default[k]})
else:
if noWarning:
print('Warning: parameter ' + k +' left undefined model wont run')
D.update({k: '$'+k})
#Update parameter on the base and write global
f = open(path2global,'w', newline='\n')
f.writelines(Base.substitute(D))
f.close()
#Erase unused print components
f = open(path2global,'r')
L = f.readlines()
f.close()
flag = True
while flag:
try:
L.remove('XXXXX\n')
except:
flag = False
f = open(path2global,'w', newline='\n')
f.writelines(L)
f.close()
def write_runfile(self, path, process, jobName = 'job',nCores = 56, nSplit = 1, queue = 'IFC'):
'''Writes the .sh file that runs the model
Parameters:
- path: path where the run file is stored.
- process: dictionary with the parameters for each process to be launch:
eg: proc = {'Global1.gbl':{'nproc': 12, 'secondplane': True}}
- ncores: Number of cores.
- nsplit: Total number of cores for each group.
- queue: name of the argon queue to run the process'''
#Define the size of the group of cores
if nCores%nSplit == 0:
Groups = int(nCores / nSplit)
else:
Groups = int(nCores / 2)
#Define the header text.
L = ['#!/bin/sh\n#$ -N '+jobName+'\n#$ -j y\n#$ -cwd\n#$ -pe smp '+str(nCores)+'\n####$ -l mf=16G\n#$ -q '+str(queue)+'\n\n/bin/echo Running on host: `hostname`.\n/bin/echo In directory: `pwd`\n/bin/echo Starting on: `date`\n']
f = open(path,'w', newline='\n')
f.write(L[0])
f.write('\n')
for k in process.keys():
secondplane = ' \n'
if process[k]['secondplane']:
secondplane = ' &\n'
if process[k]['nproc'] > nCores:
process[k]['nproc'] = nCores
f.write('mpirun -np '+str(process[k]['nproc'])+' /Users/nicolas/Tiles/dist/bin/asynch '+k+secondplane)
f.close()
| 42.847418 | 235 | 0.571468 | import pandas as pd
import geopandas as gp
import numpy as np
import pylab as pl
from struct import pack, unpack
import io
import gdal
from osgeo import ogr
import osgeo
pd.options.mode.chained_assignment = None
def read_raster(path_map,isDEMorDIR=False,dxp=None, noDataP = None,isDIR = False,DIRformat = 'r.watershed'):
direction=gdal.Open(path_map)
proj = osgeo.osr.SpatialReference(wkt=direction.GetProjection())
EPSG_code = proj.GetAttrValue('AUTHORITY',1)
ncols=direction.RasterXSize
nrows=direction.RasterYSize
banda=direction.GetRasterBand(1)
noData=banda.GetNoDataValue()
geoT=direction.GetGeoTransform()
dx=geoT[1]
dy = np.abs(geoT[-1])
xll=geoT[0]; yll=geoT[3]-nrows*dy
Mapa=direction.ReadAsArray()
direction.FlushCache()
del direction
return Mapa.T.astype(float),[ncols,nrows,xll,yll,dx,dy,noData],EPSG_code
def save_array2raster(Array, ArrayProp, path, EPSG = 4326, Format = 'GTiff'):
dst_filename = path
x_pixels = Array.shape[0]
y_pixels = Array.shape[1]
PIXEL_SIZE_x = ArrayProp[4]
PIXEL_SIZE_y = ArrayProp[5]
x_min = ArrayProp[2]
y_max = ArrayProp[3] + ArrayProp[5] * ArrayProp[1]
driver = gdal.GetDriverByName(Format)
NP2GDAL_CONVERSION = {
"uint8": 1,
"int8": 1,
"uint16": 2,
"int16": 3,
"uint32": 4,
"int32": 5,
"float32": 6,
"float64": 7,
"complex64": 10,
"complex128": 11,
}
gdaltype = NP2GDAL_CONVERSION[Array.dtype.name]
dataset = driver.Create(
dst_filename,
x_pixels,
y_pixels,
1,
gdaltype,)
dataset.SetGeoTransform((
x_min,
PIXEL_SIZE_x,
0,
y_max,
0,
-PIXEL_SIZE_y))
proj = osgeo.osr.SpatialReference()
texto = 'EPSG:' + str(EPSG)
proj.SetWellKnownGeogCS( texto )
dataset.SetProjection(proj.ExportToWkt())
band = dataset.GetRasterBand(1)
if ArrayProp[-1] is None:
band.SetNoDataValue(wmf.cu.nodata.astype(int).max())
else:
band.SetNoDataValue(int(ArrayProp[-1]))
dataset.GetRasterBand(1).WriteArray(Array.T)
dataset.FlushCache()
def rainfall_raster_ranks(path_rain_frame, path_ranks):
m, p, epsg = read_raster(path_rain_frame)
rank = np.arange(1,m.size+1)
rank = rank.reshape(m.shape)
save_array2raster(rank , p, path_ranks+'.tif', EPSG=int(epsg))
src_ds = gdal.Open(path_ranks+'.tif')
srcband = src_ds.GetRasterBand(1)
spatialReference = osgeo.osr.SpatialReference()
spatialReference.ImportFromEPSG(int(epsg))
dst_layername = path_ranks
drv = ogr.GetDriverByName("ESRI Shapefile")
dst_ds = drv.CreateDataSource( dst_layername + ".shp" )
dst_layer = dst_ds.CreateLayer(dst_layername, spatialReference )
gdal.Polygonize( srcband, None, dst_layer, -1, [], callback=None )
dst_ds.Destroy()
def saveBin(lid, lid_vals, count, fn):
io_buffer_size = 4+4*100000
if count > 0:
lid = (lid[lid_vals > 1])
lid_vals = (lid_vals[lid_vals > 1])
fh = io.open(fn, 'wb', io_buffer_size)
fh.write(pack('<I', count))
for vals in zip(lid, lid_vals):
fh.write(pack('<If', *vals))
fh.close()
class network:
def __init__(self, net_path, hills_path = None, hills_epsg = 2163):
self.network = gp.read_file(net_path)
self.network['link'] = self.network['LINKNO']
self.network.set_index('LINKNO', inplace=True)
self.network_centroids = None
self.network_ranks = None
if hills_path is not None:
self.hills = gp.read_file(hills_path)
self.hills.rename(columns={'DN':'link'}, inplace = True)
self.hills.set_index('link', inplace = True)
self.hills.to_crs(epsg = hills_epsg, inplace = True)
idx = self.hills.index.intersection(self.network.index)
self.network['area'] = self.hills.loc[idx].geometry.area/1e6
print('Area of each hillslope computed from the hills shapefile')
def network2points(self):
x =[]
y = []
for link in self.network.index:
geo = self.network.loc[link, 'geometry']
x.append(geo.centroid.x)
y.append(geo.centroid.y)
net_centroids = gp.GeoDataFrame(self.network[['link','strmOrder']], geometry = gp.points_from_xy(x, y),
crs = self.network.crs)
self.network_centroids = net_centroids
print('Centroids had been saved under self.network_centroids')
def get_rainfall_lookup(self, path_rain_ranks):
rain_ranks = gp.read_file(path_rain_ranks)
rain_ranks = rain_ranks.to_crs(self.network.crs)
print('1. rain ranks readed and projected to the current crs')
if self.network_centroids is None:
print('2. Network points not defined, defining them...')
self.network2points()
print('3. Network points defined')
points_ranked = gp.sjoin(self.network_centroids, rain_ranks, how = 'left', op = 'within')
self.rain_ranks = points_ranked
print('4. ranks obtained results stored in self.rain_ranks')
def rain2links(self, rain, path_rain = None):
if rain is None:
if path_rain is not None:
rain, p, ep = read_raster(path_rain)
rain = rain.T
rain = rain.reshape(rain.size)
else:
print('Error: No rain variable, no path to rain variable')
else:
rain = rain.reshape(rain.size)
self.rain_ranks['rain'] = 0
self.rain_ranks['rain'] = rain[self.rain_ranks['FID']]
return self.rain_ranks['rain']
def write_rvr(self, path, sub_net = None):
if sub_net is not None:
net_elem = sub_net
else:
net_elem = self.network
with open(path,'w',newline='\n') as f:
f.write('%d\n' % net_elem.shape[0])
f.write('\n')
for link in net_elem.index:
f.write('%d\n' % link)
if net_elem.loc[link,'USLINKNO1'] == -1:
f.write('0\n')
else:
f.write('2 %d %d\n' % (net_elem.loc[link,'USLINKNO1'], net_elem.loc[link,'USLINKNO2']))
f.write('\n')
f.close()
def get_subnet(self, link):
lista = [link]
count = 0
while count < len(lista) or count > self.network.shape[0]:
link = lista[count]
if self.network.loc[link, 'USLINKNO1'] != -1:
lista.append(self.network.loc[link, 'USLINKNO1'])
lista.append(self.network.loc[link, 'USLINKNO2'])
count += 1
return network(self.network.loc[lista])
def get_prm(self):
for_prm = self.network[['DSContArea','Length','area']]
for_prm['DSContArea'] = for_prm['DSContArea'] / 1e6
for_prm.shape[0] == self.network.shape[0]
for_prm.loc[for_prm['Length'] == 0, 'Length'] = 1
for_prm.loc[for_prm['area'] == 0, 'area'] = 1/1e4
for_prm.loc[np.isnan(for_prm.prm['area']), 'area'] = 1/1e4
for_prm['Length'] = for_prm['Length'] / 1000
self.prm = for_prm
def set_prm_for_model(self, model = 608):
if model == 608 or model == 609:
attr = {'vh':0.02,'a_r':1.67,'a':3.2e-6,'b':17,'c':5.4e-7,'d':32,
'k3':2.045e-6,'ki_fac':0.07,'TopDepth':0.1,'NoFlow':1.48,'Td':999,
'Beta':1.67,'lambda1':0.4,'lambda2':-0.1,'vo':0.435}
self.prm_format = {'DSContArea':'%.3f','Length':'%.3f','area':'%.5f',
'vh':'%.4f','a_r':'%.4f','a':'%.2e','b':'%.1f','c':'%.2e','d':'%.1f',
'k3':'%.2e','ki_fac':'%.3f','TopDepth':'%.3f','NoFlow':'%.3f','Td':'%.2f',
'Beta':'%.3f','lambda1':'%.3f','lambda2':'%.2f','vo':'%.3f'}
self.prm = self.prm.assign(**attr)
elif model == 254 or model == 253 or model == 256:
self.prm_format = {'DSContArea':'%.3f','Length':'%.3f','area':'%.5f'}
def write_prm(self, path):
with open(path,'w',newline='\n') as f:
f.write('%d\n\n' % self.prm.shape[0])
for link in self.prm.index:
f.write('%d\n' % link)
for c,k in zip(self.prm.loc[link],self.prm_format.keys()):
fm = self.prm_format[k]+' '
f.write(fm % c)
f.write('\n\n')
def write_Global(self, path2global, model_uid = 604,
date1 = None, date2 = None, rvrFile = None, rvrType = 0, rvrLink = 0, prmFile = None, prmType = 0, initialFile = None,
initialType = 1,rainType = 5, rainPath = None, evpFile = 'evap.mon', datResults = None,
nComponents = 1, Components = [0], controlFile = None, baseGlobal = None, noWarning = False, snapType = 0,
snapPath = '', snapTime = '', evpFromSysPath = False):
if baseGlobal is not None:
f = open(baseGlobal, 'r')
L = f.readlines()
f.close()
else:
L = Globals['60X']
t = []
for i in L:
t += i
Base = Template(''.join(t))
if rainType == 3 and rainPath is None:
rainPath = '/Dedicated/IFC/model_eval/forcing_rain51_5435_s4.dbc'
if rvrType == 1 and rvrFile is None:
rvrFile = '/Dedicated/IFC/model_eval/topo51.dbc'
if evpFromSysPath:
evpFile = Path + evpFile
Default = {
'model_uid' : model_uid,
'date1': date1,
'date2': date2,
'rvrFile': rvrFile,
'rvrType': str(rvrType),
'rvrLink': str(rvrLink),
'prmFile': prmFile,
'prmType': str(prmType),
'initialFile': initialFile,
'initialType': initialType,
'rainType': str(rainType),
'rainPath': rainPath,
'evpFile': evpFile,
'datResults': datResults,
'controlFile': controlFile,
'snapType': str(snapType),
'snapPath': snapPath,
'snapTime': str(snapTime),
'nComp': str(nComponents)
}
if date1 is not None:
Default.update({'unix1': aux.__datetime2unix__(Default['date1'])})
else:
Default.update({'unix1': '$'+'unix1'})
if date2 is not None:
Default.update({'unix2': aux.__datetime2unix__(Default['date2'])})
else:
Default.update({'unix2': '$'+'unix2'})
for n, c in enumerate(Components):
Default.update({'Comp'+str(n): 'State'+str(c)})
if nComponents <= 9:
for c in range(9-nComponents):
Default.update({'Comp'+str(8-c): 'XXXXX'})
D = {}
for k in Default.keys():
if Default[k] is not None:
D.update({k: Default[k]})
else:
if noWarning:
print('Warning: parameter ' + k +' left undefined model wont run')
D.update({k: '$'+k})
f = open(path2global,'w', newline='\n')
f.writelines(Base.substitute(D))
f.close()
f = open(path2global,'r')
L = f.readlines()
f.close()
flag = True
while flag:
try:
L.remove('XXXXX\n')
except:
flag = False
f = open(path2global,'w', newline='\n')
f.writelines(L)
f.close()
def write_runfile(self, path, process, jobName = 'job',nCores = 56, nSplit = 1, queue = 'IFC'):
if nCores%nSplit == 0:
Groups = int(nCores / nSplit)
else:
Groups = int(nCores / 2)
L = ['#!/bin/sh\n#$ -N '+jobName+'\n#$ -j y\n#$ -cwd\n#$ -pe smp '+str(nCores)+'\n####$ -l mf=16G\n#$ -q '+str(queue)+'\n\n/bin/echo Running on host: `hostname`.\n/bin/echo In directory: `pwd`\n/bin/echo Starting on: `date`\n']
f = open(path,'w', newline='\n')
f.write(L[0])
f.write('\n')
for k in process.keys():
secondplane = ' \n'
if process[k]['secondplane']:
secondplane = ' &\n'
if process[k]['nproc'] > nCores:
process[k]['nproc'] = nCores
f.write('mpirun -np '+str(process[k]['nproc'])+' /Users/nicolas/Tiles/dist/bin/asynch '+k+secondplane)
f.close()
| true | true |
f7f59fcc1e9fbb91dc59e012b1a47669aaef4598 | 16,229 | py | Python | spam/02_spam_data_augmentation_tutorial.py | jsnlp/snorkel-tutorials | b4cda9f918daf77f4011ec1598c08d9bd7e51c39 | [
"Apache-2.0"
] | 315 | 2019-07-27T22:49:20.000Z | 2022-03-30T10:02:02.000Z | spam/02_spam_data_augmentation_tutorial.py | jsnlp/snorkel-tutorials | b4cda9f918daf77f4011ec1598c08d9bd7e51c39 | [
"Apache-2.0"
] | 133 | 2019-07-25T02:07:37.000Z | 2022-03-29T12:08:32.000Z | spam/02_spam_data_augmentation_tutorial.py | jsnlp/snorkel-tutorials | b4cda9f918daf77f4011ec1598c08d9bd7e51c39 | [
"Apache-2.0"
] | 173 | 2019-08-13T02:27:11.000Z | 2022-03-30T05:26:40.000Z | # -*- coding: utf-8 -*-
# %% [markdown]
# # 📈 Snorkel Intro Tutorial: Data Augmentation
# %% [markdown]
# In this tutorial, we will walk through the process of using *transformation functions* (TFs) to perform data augmentation.
# Like the labeling tutorial, our goal is to train a classifier to YouTube comments as `SPAM` or `HAM` (not spam).
# In the [previous tutorial](https://github.com/snorkel-team/snorkel-tutorials/blob/master/spam/01_spam_tutorial.ipynb),
# we demonstrated how to label training sets programmatically with Snorkel.
# In this tutorial, we'll assume that step has already been done, and start with labeled training data,
# which we'll aim to augment using transformation functions.
#
# %% [markdown] {"tags": ["md-exclude"]}
# * For more details on the task, check out the [labeling tutorial](https://github.com/snorkel-team/snorkel-tutorials/blob/master/spam/01_spam_tutorial.ipynb)
# * For an overview of Snorkel, visit [snorkel.org](https://snorkel.org)
# * You can also check out the [Snorkel API documentation](https://snorkel.readthedocs.io/)
#
# %% [markdown]
# Data augmentation is a popular technique for increasing the size of labeled training sets by applying class-preserving transformations to create copies of labeled data points.
# In the image domain, it is a crucial factor in almost every state-of-the-art result today and is quickly gaining
# popularity in text-based applications.
# Snorkel models the data augmentation process by applying user-defined *transformation functions* (TFs) in sequence.
# You can learn more about data augmentation in
# [this blog post about our NeurIPS 2017 work on automatically learned data augmentation](https://snorkel.org/blog/tanda/).
#
# The tutorial is divided into four parts:
# 1. **Loading Data**: We load a [YouTube comments dataset](http://www.dt.fee.unicamp.br/~tiago//youtubespamcollection/).
# 2. **Writing Transformation Functions**: We write Transformation Functions (TFs) that can be applied to training data points to generate new training data points.
# 3. **Applying Transformation Functions to Augment Our Dataset**: We apply a sequence of TFs to each training data point, using a random policy, to generate an augmented training set.
# 4. **Training a Model**: We use the augmented training set to train an LSTM model for classifying new comments as `SPAM` or `HAM`.
# %% [markdown] {"tags": ["md-exclude"]}
# This next cell takes care of some notebook-specific housekeeping.
# You can ignore it.
# %% {"tags": ["md-exclude"]}
import os
import random
import numpy as np
# Make sure we're running from the spam/ directory
if os.path.basename(os.getcwd()) == "snorkel-tutorials":
os.chdir("spam")
# Turn off TensorFlow logging messages
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# For reproducibility
seed = 0
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(0)
random.seed(0)
# %% [markdown] {"tags": ["md-exclude"]}
# If you want to display all comment text untruncated, change `DISPLAY_ALL_TEXT` to `True` below.
# %% {"tags": ["md-exclude"]}
import pandas as pd
DISPLAY_ALL_TEXT = False
pd.set_option("display.max_colwidth", 0 if DISPLAY_ALL_TEXT else 50)
# %% [markdown] {"tags": ["md-exclude"]}
# This next cell makes sure a spaCy English model is downloaded.
# If this is your first time downloading this model, restart the kernel after executing the next cell.
# %% {"tags": ["md-exclude"]}
# Download the spaCy english model
# ! python -m spacy download en_core_web_sm
# %% [markdown]
# ## 1. Loading Data
# %% [markdown]
# We load the Kaggle dataset and create Pandas DataFrame objects for the `train` and `test` sets.
# The two main columns in the DataFrames are:
# * **`text`**: Raw text content of the comment
# * **`label`**: Whether the comment is `SPAM` (1) or `HAM` (0).
#
# For more details, check out the [labeling tutorial](https://github.com/snorkel-team/snorkel-tutorials/blob/master/spam/01_spam_tutorial.ipynb).
# %%
from utils import load_spam_dataset
df_train, df_test = load_spam_dataset(load_train_labels=True)
# We pull out the label vectors for ease of use later
Y_train = df_train["label"].values
Y_test = df_test["label"].values
# %%
df_train.head()
# %% [markdown]
# ## 2. Writing Transformation Functions (TFs)
#
# Transformation functions are functions that can be applied to a training data point to create another valid training data point of the same class.
# For example, for image classification problems, it is common to rotate or crop images in the training data to create new training inputs.
# Transformation functions should be atomic e.g. a small rotation of an image, or changing a single word in a sentence.
# We then compose multiple transformation functions when applying them to training data points.
#
# Common ways to augment text includes replacing words with their synonyms, or replacing names entities with other entities.
# More info can be found
# [here](https://towardsdatascience.com/data-augmentation-in-nlp-2801a34dfc28) or
# [here](https://towardsdatascience.com/these-are-the-easiest-data-augmentation-techniques-in-natural-language-processing-you-can-think-of-88e393fd610).
# Our basic modeling assumption is that applying these operations to a comment generally shouldn't change whether it is `SPAM` or not.
#
# Transformation functions in Snorkel are created with the
# [`transformation_function` decorator](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.transformation_function.html#snorkel.augmentation.transformation_function),
# which wraps a function that takes in a single data point and returns a transformed version of the data point.
# If no transformation is possible, a TF can return `None` or the original data point.
# If all the TFs applied to a data point return `None`, the data point won't be included in
# the augmented dataset when we apply our TFs below.
#
# Just like the `labeling_function` decorator, the `transformation_function` decorator
# accepts `pre` argument for `Preprocessor` objects.
# Here, we'll use a
# [`SpacyPreprocessor`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/preprocess/snorkel.preprocess.nlp.SpacyPreprocessor.html#snorkel.preprocess.nlp.SpacyPreprocessor).
# %%
from snorkel.preprocess.nlp import SpacyPreprocessor
spacy = SpacyPreprocessor(text_field="text", doc_field="doc", memoize=True)
# %%
import names
from snorkel.augmentation import transformation_function
# Pregenerate some random person names to replace existing ones with
# for the transformation strategies below
replacement_names = [names.get_full_name() for _ in range(50)]
# Replace a random named entity with a different entity of the same type.
@transformation_function(pre=[spacy])
def change_person(x):
person_names = [ent.text for ent in x.doc.ents if ent.label_ == "PERSON"]
# If there is at least one person name, replace a random one. Else return None.
if person_names:
name_to_replace = np.random.choice(person_names)
replacement_name = np.random.choice(replacement_names)
x.text = x.text.replace(name_to_replace, replacement_name)
return x
# Swap two adjectives at random.
@transformation_function(pre=[spacy])
def swap_adjectives(x):
adjective_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "ADJ"]
# Check that there are at least two adjectives to swap.
if len(adjective_idxs) >= 2:
idx1, idx2 = sorted(np.random.choice(adjective_idxs, 2, replace=False))
# Swap tokens in positions idx1 and idx2.
x.text = " ".join(
[
x.doc[:idx1].text,
x.doc[idx2].text,
x.doc[1 + idx1 : idx2].text,
x.doc[idx1].text,
x.doc[1 + idx2 :].text,
]
)
return x
# %% [markdown]
# We add some transformation functions that use `wordnet` from [NLTK](https://www.nltk.org/) to replace different parts of speech with their synonyms.
# %% {"tags": ["md-exclude-output"]}
import nltk
from nltk.corpus import wordnet as wn
nltk.download("wordnet")
def get_synonym(word, pos=None):
"""Get synonym for word given its part-of-speech (pos)."""
synsets = wn.synsets(word, pos=pos)
# Return None if wordnet has no synsets (synonym sets) for this word and pos.
if synsets:
words = [lemma.name() for lemma in synsets[0].lemmas()]
if words[0].lower() != word.lower(): # Skip if synonym is same as word.
# Multi word synonyms in wordnet use '_' as a separator e.g. reckon_with. Replace it with space.
return words[0].replace("_", " ")
def replace_token(spacy_doc, idx, replacement):
"""Replace token in position idx with replacement."""
return " ".join([spacy_doc[:idx].text, replacement, spacy_doc[1 + idx :].text])
@transformation_function(pre=[spacy])
def replace_verb_with_synonym(x):
# Get indices of verb tokens in sentence.
verb_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "VERB"]
if verb_idxs:
# Pick random verb idx to replace.
idx = np.random.choice(verb_idxs)
synonym = get_synonym(x.doc[idx].text, pos="v")
# If there's a valid verb synonym, replace it. Otherwise, return None.
if synonym:
x.text = replace_token(x.doc, idx, synonym)
return x
@transformation_function(pre=[spacy])
def replace_noun_with_synonym(x):
# Get indices of noun tokens in sentence.
noun_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "NOUN"]
if noun_idxs:
# Pick random noun idx to replace.
idx = np.random.choice(noun_idxs)
synonym = get_synonym(x.doc[idx].text, pos="n")
# If there's a valid noun synonym, replace it. Otherwise, return None.
if synonym:
x.text = replace_token(x.doc, idx, synonym)
return x
@transformation_function(pre=[spacy])
def replace_adjective_with_synonym(x):
# Get indices of adjective tokens in sentence.
adjective_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "ADJ"]
if adjective_idxs:
# Pick random adjective idx to replace.
idx = np.random.choice(adjective_idxs)
synonym = get_synonym(x.doc[idx].text, pos="a")
# If there's a valid adjective synonym, replace it. Otherwise, return None.
if synonym:
x.text = replace_token(x.doc, idx, synonym)
return x
# %%
tfs = [
change_person,
swap_adjectives,
replace_verb_with_synonym,
replace_noun_with_synonym,
replace_adjective_with_synonym,
]
# %% [markdown]
# Let's check out a few examples of transformed data points to see what our TFs are doing.
# %%
from utils import preview_tfs
preview_tfs(df_train, tfs)
# %% [markdown]
# We notice a couple of things about the TFs.
#
# * Sometimes they make trivial changes (`"website"` to `"web site"` for replace_noun_with_synonym).
# This can still be helpful for training our model, because it teaches the model to be invariant to such small changes.
# * Sometimes they introduce incorrect grammar to the sentence (e.g. `swap_adjectives` swapping `"young"` and `"more"` above).
#
# The TFs are expected to be heuristic strategies that indeed preserve the class most of the time, but
# [don't need to be perfect](https://arxiv.org/pdf/1901.11196.pdf).
# This is especially true when using automated
# [data augmentation techniques](https://snorkel.org/blog/tanda/)
# which can learn to avoid particularly corrupted data points.
# As we'll see below, Snorkel is compatible with such learned augmentation policies.
# %% [markdown]
# ## 3. Applying Transformation Functions
# %% [markdown]
# We'll first define a `Policy` to determine what sequence of TFs to apply to each data point.
# We'll start with a [`RandomPolicy`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.RandomPolicy.html)
# that samples `sequence_length=2` TFs to apply uniformly at random per data point.
# The `n_per_original` argument determines how many augmented data points to generate per original data point.
# %%
from snorkel.augmentation import RandomPolicy
random_policy = RandomPolicy(
len(tfs), sequence_length=2, n_per_original=2, keep_original=True
)
# %% [markdown]
# In some cases, we can do better than uniform random sampling.
# We might have domain knowledge that some TFs should be applied more frequently than others,
# or have trained an [automated data augmentation model](https://snorkel.org/blog/tanda/)
# that learned a sampling distribution for the TFs.
# Snorkel supports this use case with a
# [`MeanFieldPolicy`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.MeanFieldPolicy.html),
# which allows you to specify a sampling distribution for the TFs.
# We give higher probabilities to the `replace_[X]_with_synonym` TFs, since those provide more information to the model.
# %%
from snorkel.augmentation import MeanFieldPolicy
mean_field_policy = MeanFieldPolicy(
len(tfs),
sequence_length=2,
n_per_original=2,
keep_original=True,
p=[0.05, 0.05, 0.3, 0.3, 0.3],
)
# %% [markdown]
# To apply one or more TFs that we've written to a collection of data points according to our policy, we use a
# [`PandasTFApplier`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.PandasTFApplier.html)
# because our data points are represented with a Pandas DataFrame.
# %% {"tags": ["md-exclude-output"]}
from snorkel.augmentation import PandasTFApplier
tf_applier = PandasTFApplier(tfs, mean_field_policy)
df_train_augmented = tf_applier.apply(df_train)
Y_train_augmented = df_train_augmented["label"].values
# %%
print(f"Original training set size: {len(df_train)}")
print(f"Augmented training set size: {len(df_train_augmented)}")
# %% [markdown]
# We have almost doubled our dataset using TFs!
# Note that despite `n_per_original` being set to 2, our dataset may not exactly triple in size,
# because sometimes TFs return `None` instead of a new data point
# (e.g. `change_person` when applied to a sentence with no persons).
# If you prefer to have exact proportions for your dataset, you can have TFs that can't perform a
# valid transformation return the original data point rather than `None` (as they do here).
# %% [markdown]
# ## 4. Training A Model
#
# Our final step is to use the augmented data to train a model. We train an LSTM (Long Short Term Memory) model, which is a very standard architecture for text processing tasks.
# %% [markdown] {"tags": ["md-exclude"]}
# The next cell makes Keras results reproducible. You can ignore it.
# %% {"tags": ["md-exclude"]}
import tensorflow as tf
session_conf = tf.compat.v1.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1
)
tf.compat.v1.set_random_seed(0)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
# %% [markdown]
# Now we'll train our LSTM on both the original and augmented datasets to compare performance.
# %% {"tags": ["md-exclude-output"]}
from utils import featurize_df_tokens, get_keras_lstm
X_train = featurize_df_tokens(df_train)
X_train_augmented = featurize_df_tokens(df_train_augmented)
X_test = featurize_df_tokens(df_test)
def train_and_test(X_train, Y_train, X_test=X_test, Y_test=Y_test, num_buckets=30000):
# Define a vanilla LSTM model with Keras
lstm_model = get_keras_lstm(num_buckets)
lstm_model.fit(X_train, Y_train, epochs=5, verbose=0)
preds_test = lstm_model.predict(X_test)[:, 0] > 0.5
return (preds_test == Y_test).mean()
acc_augmented = train_and_test(X_train_augmented, Y_train_augmented)
acc_original = train_and_test(X_train, Y_train)
# %%
print(f"Test Accuracy (original training data): {100 * acc_original:.1f}%")
print(f"Test Accuracy (augmented training data): {100 * acc_augmented:.1f}%")
# %% [markdown]
# So using the augmented dataset indeed improved our model!
# There is a lot more you can do with data augmentation, so try a few ideas
# out on your own!
| 42.595801 | 212 | 0.732146 |
ansformation functions.
import os
import random
import numpy as np
if os.path.basename(os.getcwd()) == "snorkel-tutorials":
os.chdir("spam")
# Turn off TensorFlow logging messages
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# For reproducibility
seed = 0
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(0)
random.seed(0)
# %% [markdown] {"tags": ["md-exclude"]}
# If you want to display all comment text untruncated, change `DISPLAY_ALL_TEXT` to `True` below.
# %% {"tags": ["md-exclude"]}
import pandas as pd
DISPLAY_ALL_TEXT = False
pd.set_option("display.max_colwidth", 0 if DISPLAY_ALL_TEXT else 50)
# %% [markdown] {"tags": ["md-exclude"]}
# This next cell makes sure a spaCy English model is downloaded.
# If this is your first time downloading this model, restart the kernel after executing the next cell.
# %% {"tags": ["md-exclude"]}
# Download the spaCy english model
# ! python -m spacy download en_core_web_sm
# %% [markdown]
# ## 1. Loading Data
# %% [markdown]
# We load the Kaggle dataset and create Pandas DataFrame objects for the `train` and `test` sets.
# The two main columns in the DataFrames are:
# * **`text`**: Raw text content of the comment
# * **`label`**: Whether the comment is `SPAM` (1) or `HAM` (0).
#
# For more details, check out the [labeling tutorial](https://github.com/snorkel-team/snorkel-tutorials/blob/master/spam/01_spam_tutorial.ipynb).
# %%
from utils import load_spam_dataset
df_train, df_test = load_spam_dataset(load_train_labels=True)
# We pull out the label vectors for ease of use later
Y_train = df_train["label"].values
Y_test = df_test["label"].values
# %%
df_train.head()
# %% [markdown]
# ## 2. Writing Transformation Functions (TFs)
#
# Transformation functions are functions that can be applied to a training data point to create another valid training data point of the same class.
# For example, for image classification problems, it is common to rotate or crop images in the training data to create new training inputs.
# Transformation functions should be atomic e.g. a small rotation of an image, or changing a single word in a sentence.
# We then compose multiple transformation functions when applying them to training data points.
#
# Common ways to augment text includes replacing words with their synonyms, or replacing names entities with other entities.
# More info can be found
# [here](https://towardsdatascience.com/data-augmentation-in-nlp-2801a34dfc28) or
# [here](https://towardsdatascience.com/these-are-the-easiest-data-augmentation-techniques-in-natural-language-processing-you-can-think-of-88e393fd610).
# Our basic modeling assumption is that applying these operations to a comment generally shouldn't change whether it is `SPAM` or not.
Fs below.
#
# Just like the `labeling_function` decorator, the `transformation_function` decorator
# accepts `pre` argument for `Preprocessor` objects.
# Here, we'll use a
Preprocessor
spacy = SpacyPreprocessor(text_field="text", doc_field="doc", memoize=True)
import names
from snorkel.augmentation import transformation_function
replacement_names = [names.get_full_name() for _ in range(50)]
@transformation_function(pre=[spacy])
def change_person(x):
person_names = [ent.text for ent in x.doc.ents if ent.label_ == "PERSON"]
if person_names:
name_to_replace = np.random.choice(person_names)
replacement_name = np.random.choice(replacement_names)
x.text = x.text.replace(name_to_replace, replacement_name)
return x
@transformation_function(pre=[spacy])
def swap_adjectives(x):
adjective_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "ADJ"]
if len(adjective_idxs) >= 2:
idx1, idx2 = sorted(np.random.choice(adjective_idxs, 2, replace=False))
x.text = " ".join(
[
x.doc[:idx1].text,
x.doc[idx2].text,
x.doc[1 + idx1 : idx2].text,
x.doc[idx1].text,
x.doc[1 + idx2 :].text,
]
)
return x
import nltk
from nltk.corpus import wordnet as wn
nltk.download("wordnet")
def get_synonym(word, pos=None):
synsets = wn.synsets(word, pos=pos)
if synsets:
words = [lemma.name() for lemma in synsets[0].lemmas()]
if words[0].lower() != word.lower():
return words[0].replace("_", " ")
def replace_token(spacy_doc, idx, replacement):
return " ".join([spacy_doc[:idx].text, replacement, spacy_doc[1 + idx :].text])
@transformation_function(pre=[spacy])
def replace_verb_with_synonym(x):
verb_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "VERB"]
if verb_idxs:
idx = np.random.choice(verb_idxs)
synonym = get_synonym(x.doc[idx].text, pos="v")
if synonym:
x.text = replace_token(x.doc, idx, synonym)
return x
@transformation_function(pre=[spacy])
def replace_noun_with_synonym(x):
# Get indices of noun tokens in sentence.
noun_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "NOUN"]
if noun_idxs:
# Pick random noun idx to replace.
idx = np.random.choice(noun_idxs)
synonym = get_synonym(x.doc[idx].text, pos="n")
# If there's a valid noun synonym, replace it. Otherwise, return None.
if synonym:
x.text = replace_token(x.doc, idx, synonym)
return x
@transformation_function(pre=[spacy])
def replace_adjective_with_synonym(x):
adjective_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "ADJ"]
if adjective_idxs:
idx = np.random.choice(adjective_idxs)
synonym = get_synonym(x.doc[idx].text, pos="a")
if synonym:
x.text = replace_token(x.doc, idx, synonym)
return x
# %%
tfs = [
change_person,
swap_adjectives,
replace_verb_with_synonym,
replace_noun_with_synonym,
replace_adjective_with_synonym,
]
# %% [markdown]
# Let's check out a few examples of transformed data points to see what our TFs are doing.
from utils import preview_tfs
preview_tfs(df_train, tfs)
# This is especially true when using automated
# [data augmentation techniques](https://snorkel.org/blog/tanda/)
# which can learn to avoid particularly corrupted data points.
# As we'll see below, Snorkel is compatible with such learned augmentation policies.
aster/packages/_autosummary/augmentation/snorkel.augmentation.RandomPolicy.html)
from snorkel.augmentation import RandomPolicy
random_policy = RandomPolicy(
len(tfs), sequence_length=2, n_per_original=2, keep_original=True
)
from snorkel.augmentation import MeanFieldPolicy
mean_field_policy = MeanFieldPolicy(
len(tfs),
sequence_length=2,
n_per_original=2,
keep_original=True,
p=[0.05, 0.05, 0.3, 0.3, 0.3],
)
# [`PandasTFApplier`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.PandasTFApplier.html)
# because our data points are represented with a Pandas DataFrame.
# %% {"tags": ["md-exclude-output"]}
from snorkel.augmentation import PandasTFApplier
tf_applier = PandasTFApplier(tfs, mean_field_policy)
df_train_augmented = tf_applier.apply(df_train)
Y_train_augmented = df_train_augmented["label"].values
# %%
print(f"Original training set size: {len(df_train)}")
print(f"Augmented training set size: {len(df_train_augmented)}")
# %% [markdown]
# We have almost doubled our dataset using TFs!
# Note that despite `n_per_original` being set to 2, our dataset may not exactly triple in size,
# because sometimes TFs return `None` instead of a new data point
# (e.g. `change_person` when applied to a sentence with no persons).
# If you prefer to have exact proportions for your dataset, you can have TFs that can't perform a
nf = tf.compat.v1.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1
)
tf.compat.v1.set_random_seed(0)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
# %% {"tags": ["md-exclude-output"]}
from utils import featurize_df_tokens, get_keras_lstm
X_train = featurize_df_tokens(df_train)
X_train_augmented = featurize_df_tokens(df_train_augmented)
X_test = featurize_df_tokens(df_test)
def train_and_test(X_train, Y_train, X_test=X_test, Y_test=Y_test, num_buckets=30000):
# Define a vanilla LSTM model with Keras
lstm_model = get_keras_lstm(num_buckets)
lstm_model.fit(X_train, Y_train, epochs=5, verbose=0)
preds_test = lstm_model.predict(X_test)[:, 0] > 0.5
return (preds_test == Y_test).mean()
acc_augmented = train_and_test(X_train_augmented, Y_train_augmented)
acc_original = train_and_test(X_train, Y_train)
# %%
print(f"Test Accuracy (original training data): {100 * acc_original:.1f}%")
print(f"Test Accuracy (augmented training data): {100 * acc_augmented:.1f}%")
# %% [markdown]
# So using the augmented dataset indeed improved our model!
# There is a lot more you can do with data augmentation, so try a few ideas
# out on your own!
| true | true |
f7f5a0158e7bfdf099d6d0b6a95fc1b96aec2327 | 2,823 | py | Python | heuristic.py | gaurav952/puzzle-game-tetris-using-genetic-algorithm- | 266a71003119d7b2d2f631ad51cabd8d86b76cb4 | [
"Apache-2.0"
] | null | null | null | heuristic.py | gaurav952/puzzle-game-tetris-using-genetic-algorithm- | 266a71003119d7b2d2f631ad51cabd8d86b76cb4 | [
"Apache-2.0"
] | null | null | null | heuristic.py | gaurav952/puzzle-game-tetris-using-genetic-algorithm- | 266a71003119d7b2d2f631ad51cabd8d86b76cb4 | [
"Apache-2.0"
] | 1 | 2019-12-03T08:21:30.000Z | 2019-12-03T08:21:30.000Z | """These heuristics attempt to assess how favourable a given board is.
The board should already have the dummy 23rd row stripped out.
When the heuristic says "block" it means one individual cell of a stone.
How you weight the different heuristics is up to you.
Most are designed to indicate bad things, and so should have negative weights.
"""
def _is_block(cell):
return cell != 0
def _is_empty(cell):
return cell == 0
def _holes_in_board(board):
"""A hole is defined as an empty space below a block.
The block doesn't have to be directly above the hole for it to count.
This function identifies any holes and returns them as a [(x,y)]
"""
holes = []
block_in_col = False
for x in range(len(board[0])):
for y in range(len(board)):
if block_in_col and _is_empty(board[y][x]):
holes.append((x,y))
elif _is_block(board[y][x]):
block_in_col = True
block_in_col = False
return holes
def num_holes(board):
"""Number of holes that exist on the board."""
return len(_holes_in_board(board))
def num_blocks_above_holes(board):
"""Number of blocks that are placed above holes. Note that the block
doesn't have to be directly above the hole, a stack of three blocks on
top of a single hole will give a result of 3."""
c = 0
for hole_x, hole_y in _holes_in_board(board):
for y in range(hole_y-1, 0, -1):
if _is_block(board[y][hole_x]):
c += 1
else:
break
return c
def num_gaps(board):
"""Like holes, but horizontal. Discourages waiting for the magic I-beam piece.
Need to find block-gap-block sequences. A wall can substitute for a block."""
gaps = []
sequence = 0 # 0 = no progress, 1 = found block, 2 = found block-gap, 3 = found block-gap-block (not used)
board_copy = []
# Make walls into blocks for simplicity
for y in range(len(board)):
board_copy.append([1] + board[y] + [1])
# Detect gaps
for y in range(len(board_copy)):
for x in range(len(board_copy[0])):
if sequence == 0 and _is_block(board_copy[y][x]):
sequence = 1
elif sequence == 1 and _is_empty(board_copy[y][x]):
sequence = 2
elif sequence == 2:
if _is_block(board_copy[y][x]):
gaps.append(board_copy[y][x-1])
sequence = 1
else:
sequence = 0
return len(gaps)
def max_height(board):
"""Height of the highest block on the board"""
for idx, row in enumerate(board):
for cell in row:
if _is_block(cell):
return len(board) - idx-1
def avg_height(board):
"""Average height of blocks on the board"""
total_height = 0
for height, row in enumerate(reversed(board[1:])):
for cell in row:
if _is_block(cell):
total_height += height
return total_height / num_blocks(board)
def num_blocks(board):
"""Number of blocks that exist on the board."""
c = 0
for row in board:
for cell in row:
if _is_block(cell):
c += 1
return c | 28.806122 | 107 | 0.690755 |
def _is_block(cell):
return cell != 0
def _is_empty(cell):
return cell == 0
def _holes_in_board(board):
holes = []
block_in_col = False
for x in range(len(board[0])):
for y in range(len(board)):
if block_in_col and _is_empty(board[y][x]):
holes.append((x,y))
elif _is_block(board[y][x]):
block_in_col = True
block_in_col = False
return holes
def num_holes(board):
return len(_holes_in_board(board))
def num_blocks_above_holes(board):
c = 0
for hole_x, hole_y in _holes_in_board(board):
for y in range(hole_y-1, 0, -1):
if _is_block(board[y][hole_x]):
c += 1
else:
break
return c
def num_gaps(board):
gaps = []
sequence = 0
board_copy = []
for y in range(len(board)):
board_copy.append([1] + board[y] + [1])
for y in range(len(board_copy)):
for x in range(len(board_copy[0])):
if sequence == 0 and _is_block(board_copy[y][x]):
sequence = 1
elif sequence == 1 and _is_empty(board_copy[y][x]):
sequence = 2
elif sequence == 2:
if _is_block(board_copy[y][x]):
gaps.append(board_copy[y][x-1])
sequence = 1
else:
sequence = 0
return len(gaps)
def max_height(board):
for idx, row in enumerate(board):
for cell in row:
if _is_block(cell):
return len(board) - idx-1
def avg_height(board):
total_height = 0
for height, row in enumerate(reversed(board[1:])):
for cell in row:
if _is_block(cell):
total_height += height
return total_height / num_blocks(board)
def num_blocks(board):
c = 0
for row in board:
for cell in row:
if _is_block(cell):
c += 1
return c | true | true |
f7f5a2ea065f28f9a2ca4b205085be35bc1e9cc2 | 3,999 | py | Python | custom_components/tesla_custom/climate.py | carleeno/tesla | 81c342d5c2564eed3659cc5a0711b4c209dd6773 | [
"Apache-2.0"
] | 108 | 2021-04-29T11:38:05.000Z | 2022-03-25T10:35:28.000Z | custom_components/tesla_custom/climate.py | carleeno/tesla | 81c342d5c2564eed3659cc5a0711b4c209dd6773 | [
"Apache-2.0"
] | 120 | 2021-04-29T07:49:59.000Z | 2022-03-31T04:45:15.000Z | custom_components/tesla_custom/climate.py | carleeno/tesla | 81c342d5c2564eed3659cc5a0711b4c209dd6773 | [
"Apache-2.0"
] | 33 | 2021-05-01T16:03:07.000Z | 2022-03-12T21:54:40.000Z | """Support for Tesla HVAC system."""
from __future__ import annotations
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from teslajsonpy.exceptions import UnknownPresetMode
from . import DOMAIN as TESLA_DOMAIN
from .tesla_device import TeslaDevice
_LOGGER = logging.getLogger(__name__)
SUPPORT_HVAC = [HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Tesla binary_sensors by config_entry."""
async_add_entities(
[
TeslaThermostat(
device,
hass.data[TESLA_DOMAIN][config_entry.entry_id]["coordinator"],
)
for device in hass.data[TESLA_DOMAIN][config_entry.entry_id]["devices"][
"climate"
]
],
True,
)
class TeslaThermostat(TeslaDevice, ClimateEntity):
"""Representation of a Tesla climate."""
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if self.tesla_device.is_hvac_enabled():
return HVAC_MODE_HEAT_COOL
return HVAC_MODE_OFF
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return SUPPORT_HVAC
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self.tesla_device.measurement == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.tesla_device.get_current_temp()
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.tesla_device.get_goal_temp()
@TeslaDevice.Decorators.check_for_reauth
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature:
_LOGGER.debug("%s: Setting temperature to %s", self.name, temperature)
await self.tesla_device.set_temperature(temperature)
self.async_write_ha_state()
@TeslaDevice.Decorators.check_for_reauth
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
_LOGGER.debug("%s: Setting hvac mode to %s", self.name, hvac_mode)
if hvac_mode == HVAC_MODE_OFF:
await self.tesla_device.set_status(False)
elif hvac_mode == HVAC_MODE_HEAT_COOL:
await self.tesla_device.set_status(True)
self.async_write_ha_state()
@TeslaDevice.Decorators.check_for_reauth
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
_LOGGER.debug("%s: Setting preset_mode to: %s", self.name, preset_mode)
try:
await self.tesla_device.set_preset_mode(preset_mode)
self.async_write_ha_state()
except UnknownPresetMode as ex:
_LOGGER.error("%s", ex.message)
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
return self.tesla_device.preset_mode
@property
def preset_modes(self) -> list[str] | None:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return self.tesla_device.preset_modes
| 31.488189 | 84 | 0.664416 | from __future__ import annotations
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from teslajsonpy.exceptions import UnknownPresetMode
from . import DOMAIN as TESLA_DOMAIN
from .tesla_device import TeslaDevice
_LOGGER = logging.getLogger(__name__)
SUPPORT_HVAC = [HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF]
async def async_setup_entry(hass, config_entry, async_add_entities):
async_add_entities(
[
TeslaThermostat(
device,
hass.data[TESLA_DOMAIN][config_entry.entry_id]["coordinator"],
)
for device in hass.data[TESLA_DOMAIN][config_entry.entry_id]["devices"][
"climate"
]
],
True,
)
class TeslaThermostat(TeslaDevice, ClimateEntity):
@property
def supported_features(self):
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
@property
def hvac_mode(self):
if self.tesla_device.is_hvac_enabled():
return HVAC_MODE_HEAT_COOL
return HVAC_MODE_OFF
@property
def hvac_modes(self):
return SUPPORT_HVAC
@property
def temperature_unit(self):
if self.tesla_device.measurement == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def current_temperature(self):
return self.tesla_device.get_current_temp()
@property
def target_temperature(self):
return self.tesla_device.get_goal_temp()
@TeslaDevice.Decorators.check_for_reauth
async def async_set_temperature(self, **kwargs):
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature:
_LOGGER.debug("%s: Setting temperature to %s", self.name, temperature)
await self.tesla_device.set_temperature(temperature)
self.async_write_ha_state()
@TeslaDevice.Decorators.check_for_reauth
async def async_set_hvac_mode(self, hvac_mode):
_LOGGER.debug("%s: Setting hvac mode to %s", self.name, hvac_mode)
if hvac_mode == HVAC_MODE_OFF:
await self.tesla_device.set_status(False)
elif hvac_mode == HVAC_MODE_HEAT_COOL:
await self.tesla_device.set_status(True)
self.async_write_ha_state()
@TeslaDevice.Decorators.check_for_reauth
async def async_set_preset_mode(self, preset_mode: str) -> None:
_LOGGER.debug("%s: Setting preset_mode to: %s", self.name, preset_mode)
try:
await self.tesla_device.set_preset_mode(preset_mode)
self.async_write_ha_state()
except UnknownPresetMode as ex:
_LOGGER.error("%s", ex.message)
@property
def preset_mode(self) -> str | None:
return self.tesla_device.preset_mode
@property
def preset_modes(self) -> list[str] | None:
return self.tesla_device.preset_modes
| true | true |
f7f5a314dab51c1252d7afe85641cb59e070a7e8 | 8,962 | py | Python | uncore_csv_json.py | neolinsu/event-converter-for-linux-perf | 5bc53af2e79d81345cef6672f130167d00c0e441 | [
"BSD-3-Clause"
] | 6 | 2021-04-13T08:02:07.000Z | 2022-02-09T20:08:48.000Z | uncore_csv_json.py | neolinsu/event-converter-for-linux-perf | 5bc53af2e79d81345cef6672f130167d00c0e441 | [
"BSD-3-Clause"
] | 3 | 2020-11-17T18:40:17.000Z | 2022-03-17T23:57:53.000Z | uncore_csv_json.py | neolinsu/event-converter-for-linux-perf | 5bc53af2e79d81345cef6672f130167d00c0e441 | [
"BSD-3-Clause"
] | 4 | 2020-11-17T07:38:36.000Z | 2022-02-17T04:10:56.000Z | #!/usr/bin/python
# Copyright (c) 2020, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# generate split uncore json from csv spreadsheet input
# uncore_csv_json.py csv orig-pme-json targetdir
from __future__ import print_function
import json
import sys
import csv
import copy
import argparse
import itertools
import re
repl_events = {
"UNC_M_CLOCKTICKS": "UNC_M_DCLOCKTICKS"
}
ap = argparse.ArgumentParser()
ap.add_argument('csvfile')
ap.add_argument('jsonfile')
ap.add_argument('targetdir')
ap.add_argument('extrajsonfile', nargs='?', help='Extra json file to look up events (e.g. experimential)')
ap.add_argument('--all', action='store_true', help='Include all events from jsonfile, not just CSV events')
args = ap.parse_args()
def read_events(fn):
events = {}
j = json.load(open(fn, "r"))
for l in j:
events[l["EventName"]] = l
return events
events = read_events(args.jsonfile)
events2 = read_events(args.extrajsonfile) if args.extrajsonfile else None
def gen_topic(u):
if u == "iMC":
return "Uncore-Memory"
if u == "CBO" or u == "HA":
return "Uncore-Cache"
if u.startswith("QPI"):
return "Uncore-Interconnect"
if u == "PCU":
return "Uncore-Power"
return "Uncore-Other"
def update(j):
if j["Unit"] == "PCU" and "UMask" in j:
# XXX should convert to right filter for occupancy
del j["UMask"]
unit_remap = {
"IMC": "iMC",
"KTI LL": "UPI",
}
if j["Unit"] in unit_remap:
j["Unit"] = unit_remap[j["Unit"]]
j["Topic"] = gen_topic(j["Unit"])
j["PerPkg"] = "1"
if "Counter" in j and j["Counter"] == "FIXED":
j["EventCode"] = "0xff"
j["UMask"] = "0x00"
for k in list(j.keys()):
if j[k] in ("0x0", "0x00", "0X00", "null", "", "0", None, "tbd", "TBD", "na"):
del j[k]
return j
jl = []
added = set()
c = csv.reader(open(args.csvfile, "r"))
for l in c:
# UNC_C_LLC_LOOKUP.ANY,new name,All LLC Misses (code+ data rd + data wr - including demand and prefetch),"State=0x1,",scale,formula (with x),comment (optional)
if len(l) == 6:
l.append("")
name, newname, desc, filter, scale, formula, comment = l
umask = None
if ":" in name:
name, umask = name.split(":")
umask = umask[1:]
if filter:
filter = filter.replace("State=", ",filter_state=")
filter = filter.replace("Match=", ",filter_opc=")
filter = filter.replace(":opc=", ",filter_opc=")
filter = filter.replace(":nc=", ",filter_nc=")
filter = filter.replace(":tid=", ",filter_tid=")
filter = filter.replace(":state=", ",filter_state=")
filter = filter.replace(":filter1=", ",config1=")
filter = filter.replace("fc, chnl", "")
m = re.match(r':u[0-9xa-f]+', filter)
if m:
umask = "%#x" % int(m.group(0)[2:], 16)
filter = filter.replace(m.group(0), '')
if filter and filter[0] == ",":
filter = filter[1:]
if filter.endswith(","):
filter = filter[:-1]
def find_event(events, name):
if name in events:
return events[name]
if name in repl_events:
name = repl_events[name]
if name in events:
return events[name]
nname = name[:name.rfind(".")]
if nname in events:
j = events[nname]
return None
def find_event_all(name):
j = find_event(events, name)
if j:
return j
if events2:
return find_event(events2, name)
return None
j = find_event_all(name)
def is_deprecated(j):
return "Deprecated" in j and j["Deprecated"] == "1"
if j is None or is_deprecated(j):
for i, r in (("_H_", "_CHA_"), ("_C_", "_CHA_")):
nname = name.replace(i, r)
j = find_event_all(nname)
if j:
name = nname
break
if j is None:
print("event", name, "not found", file=sys.stderr)
continue
if is_deprecated(j):
print("Could not find non deprecated version of", name, file=sys.stderr)
j = update(j)
j["EventName"] = newname if newname else name
if desc == "" and "BriefDescription" in j:
desc = j["BriefDescription"]
if "Description" in j:
del j["Description"]
if desc.endswith("."):
desc = desc[:-1]
j["BriefDescription"] = desc
if newname and newname.lower() != name.lower():
j["BriefDescription"] += ". Derived from " + name.lower()
if "PublicDescription" in j:
del j["PublicDescription"]
j["Filter"] = filter
if formula:
# XXX hack for now
nn = newname if newname else name
formula = re.sub(r"X/", nn+ "/", formula)
for o in repl_events.keys():
if o in formula and o not in events:
formula = formula.replace(o, repl_events[o])
# Don't apply % for Latency Metrics
if "/" in formula and "LATENCY" not in nn:
j["MetricExpr"] = "(%s) * 100." % (formula.replace("/", " / "))
j["MetricName"] = re.sub(r'UNC_[A-Z]_', '', nn).lower() + " %"
else:
j["MetricExpr"] = formula.replace("\n", "")
j["MetricName"] = nn
if umask:
j["UMask"] = "%#02x" % int(umask, 16)
if scale:
# If scale has a unit, use it
if "(" in scale:
scale = scale.replace("(", "")
j["ScaleUnit"] = scale.replace(")", "")
else:
j["ScaleUnit"] = scale + "Bytes"
if j["EventName"] in added:
print(j["EventName"], "duplicated", file=sys.stderr)
continue
j = update(j)
added.add(j["EventName"])
if newname and newname.lower() != name.lower():
added.add(name)
jl.append(copy.deepcopy(j))
if args.all:
def skip(j):
return "Deprecated" in j and j["Deprecated"] == "1"
jl += [update(events[x]) for x in sorted(events.keys()) if not skip(events[x]) and not x in added]
for j in jl:
if "UMask" in j.keys() and "UMaskExt" in j.keys():
str = j["UMask"][2:]
j["UMask"] = j["UMaskExt"] + str
if "FILTER_VALUE" in j.keys() and j["Filter"] == "Filter1":
j["Filter"] = "config1=" + j["FILTER_VALUE"]
del j["FILTER_VALUE"]
desc = None
if "BriefDescription" in j:
desc = j["BriefDescription"]
if "PublicDescription" in j:
desc = j["PublicDescription"]
if not desc:
print(j["EventName"], "has no description", file=sys.stderr)
if desc and len(desc) > 900:
print(j["EventName"], "has too long description for git (%d)" % len(desc), file=sys.stderr)
def get_topic(j):
return j["Topic"]
#print(jl)
remove_l = []
for j in jl:
if "Filter" in j.keys():
if j["Filter"].startswith('CHAFilter'):
remove_l.append(j)
if j["Filter"] == "fc, chnl" or j["Filter"].startswith('chnl'):
del j["Filter"]
if "BriefDescription" not in j.keys() and "PublicDescription" not in j.keys():
remove_l.append(j)
for j in remove_l:
jl.remove(j)
for topic, iter in itertools.groupby(sorted(jl, key=get_topic), key=get_topic):
events = list(iter)
for j in events:
del j["Topic"]
print("generating", topic)
of = open(args.targetdir + "/" + topic.lower() + ".json", "w")
js = json.dumps(events, sort_keys=True, indent=4, separators=(',', ': '))
print(js, file=of)
of.close()
| 34.337165 | 163 | 0.600647 |
from __future__ import print_function
import json
import sys
import csv
import copy
import argparse
import itertools
import re
repl_events = {
"UNC_M_CLOCKTICKS": "UNC_M_DCLOCKTICKS"
}
ap = argparse.ArgumentParser()
ap.add_argument('csvfile')
ap.add_argument('jsonfile')
ap.add_argument('targetdir')
ap.add_argument('extrajsonfile', nargs='?', help='Extra json file to look up events (e.g. experimential)')
ap.add_argument('--all', action='store_true', help='Include all events from jsonfile, not just CSV events')
args = ap.parse_args()
def read_events(fn):
events = {}
j = json.load(open(fn, "r"))
for l in j:
events[l["EventName"]] = l
return events
events = read_events(args.jsonfile)
events2 = read_events(args.extrajsonfile) if args.extrajsonfile else None
def gen_topic(u):
if u == "iMC":
return "Uncore-Memory"
if u == "CBO" or u == "HA":
return "Uncore-Cache"
if u.startswith("QPI"):
return "Uncore-Interconnect"
if u == "PCU":
return "Uncore-Power"
return "Uncore-Other"
def update(j):
if j["Unit"] == "PCU" and "UMask" in j:
del j["UMask"]
unit_remap = {
"IMC": "iMC",
"KTI LL": "UPI",
}
if j["Unit"] in unit_remap:
j["Unit"] = unit_remap[j["Unit"]]
j["Topic"] = gen_topic(j["Unit"])
j["PerPkg"] = "1"
if "Counter" in j and j["Counter"] == "FIXED":
j["EventCode"] = "0xff"
j["UMask"] = "0x00"
for k in list(j.keys()):
if j[k] in ("0x0", "0x00", "0X00", "null", "", "0", None, "tbd", "TBD", "na"):
del j[k]
return j
jl = []
added = set()
c = csv.reader(open(args.csvfile, "r"))
for l in c:
if len(l) == 6:
l.append("")
name, newname, desc, filter, scale, formula, comment = l
umask = None
if ":" in name:
name, umask = name.split(":")
umask = umask[1:]
if filter:
filter = filter.replace("State=", ",filter_state=")
filter = filter.replace("Match=", ",filter_opc=")
filter = filter.replace(":opc=", ",filter_opc=")
filter = filter.replace(":nc=", ",filter_nc=")
filter = filter.replace(":tid=", ",filter_tid=")
filter = filter.replace(":state=", ",filter_state=")
filter = filter.replace(":filter1=", ",config1=")
filter = filter.replace("fc, chnl", "")
m = re.match(r':u[0-9xa-f]+', filter)
if m:
umask = "%#x" % int(m.group(0)[2:], 16)
filter = filter.replace(m.group(0), '')
if filter and filter[0] == ",":
filter = filter[1:]
if filter.endswith(","):
filter = filter[:-1]
def find_event(events, name):
if name in events:
return events[name]
if name in repl_events:
name = repl_events[name]
if name in events:
return events[name]
nname = name[:name.rfind(".")]
if nname in events:
j = events[nname]
return None
def find_event_all(name):
j = find_event(events, name)
if j:
return j
if events2:
return find_event(events2, name)
return None
j = find_event_all(name)
def is_deprecated(j):
return "Deprecated" in j and j["Deprecated"] == "1"
if j is None or is_deprecated(j):
for i, r in (("_H_", "_CHA_"), ("_C_", "_CHA_")):
nname = name.replace(i, r)
j = find_event_all(nname)
if j:
name = nname
break
if j is None:
print("event", name, "not found", file=sys.stderr)
continue
if is_deprecated(j):
print("Could not find non deprecated version of", name, file=sys.stderr)
j = update(j)
j["EventName"] = newname if newname else name
if desc == "" and "BriefDescription" in j:
desc = j["BriefDescription"]
if "Description" in j:
del j["Description"]
if desc.endswith("."):
desc = desc[:-1]
j["BriefDescription"] = desc
if newname and newname.lower() != name.lower():
j["BriefDescription"] += ". Derived from " + name.lower()
if "PublicDescription" in j:
del j["PublicDescription"]
j["Filter"] = filter
if formula:
nn = newname if newname else name
formula = re.sub(r"X/", nn+ "/", formula)
for o in repl_events.keys():
if o in formula and o not in events:
formula = formula.replace(o, repl_events[o])
if "/" in formula and "LATENCY" not in nn:
j["MetricExpr"] = "(%s) * 100." % (formula.replace("/", " / "))
j["MetricName"] = re.sub(r'UNC_[A-Z]_', '', nn).lower() + " %"
else:
j["MetricExpr"] = formula.replace("\n", "")
j["MetricName"] = nn
if umask:
j["UMask"] = "%#02x" % int(umask, 16)
if scale:
# If scale has a unit, use it
if "(" in scale:
scale = scale.replace("(", "")
j["ScaleUnit"] = scale.replace(")", "")
else:
j["ScaleUnit"] = scale + "Bytes"
if j["EventName"] in added:
print(j["EventName"], "duplicated", file=sys.stderr)
continue
j = update(j)
added.add(j["EventName"])
if newname and newname.lower() != name.lower():
added.add(name)
jl.append(copy.deepcopy(j))
if args.all:
def skip(j):
return "Deprecated" in j and j["Deprecated"] == "1"
jl += [update(events[x]) for x in sorted(events.keys()) if not skip(events[x]) and not x in added]
for j in jl:
if "UMask" in j.keys() and "UMaskExt" in j.keys():
str = j["UMask"][2:]
j["UMask"] = j["UMaskExt"] + str
if "FILTER_VALUE" in j.keys() and j["Filter"] == "Filter1":
j["Filter"] = "config1=" + j["FILTER_VALUE"]
del j["FILTER_VALUE"]
desc = None
if "BriefDescription" in j:
desc = j["BriefDescription"]
if "PublicDescription" in j:
desc = j["PublicDescription"]
if not desc:
print(j["EventName"], "has no description", file=sys.stderr)
if desc and len(desc) > 900:
print(j["EventName"], "has too long description for git (%d)" % len(desc), file=sys.stderr)
def get_topic(j):
return j["Topic"]
#print(jl)
remove_l = []
for j in jl:
if "Filter" in j.keys():
if j["Filter"].startswith('CHAFilter'):
remove_l.append(j)
if j["Filter"] == "fc, chnl" or j["Filter"].startswith('chnl'):
del j["Filter"]
if "BriefDescription" not in j.keys() and "PublicDescription" not in j.keys():
remove_l.append(j)
for j in remove_l:
jl.remove(j)
for topic, iter in itertools.groupby(sorted(jl, key=get_topic), key=get_topic):
events = list(iter)
for j in events:
del j["Topic"]
print("generating", topic)
of = open(args.targetdir + "/" + topic.lower() + ".json", "w")
js = json.dumps(events, sort_keys=True, indent=4, separators=(',', ': '))
print(js, file=of)
of.close()
| true | true |
f7f5a326326a4966f92a93d23568b99d4d8f1093 | 1,003 | py | Python | src/decisionengine_modules/util/testutils.py | hyunwoo18/decisionengine_modules | a67462628c2074e768d0825edee4ee5d570030e0 | [
"BSD-3-Clause"
] | null | null | null | src/decisionengine_modules/util/testutils.py | hyunwoo18/decisionengine_modules | a67462628c2074e768d0825edee4ee5d570030e0 | [
"BSD-3-Clause"
] | null | null | null | src/decisionengine_modules/util/testutils.py | hyunwoo18/decisionengine_modules | a67462628c2074e768d0825edee4ee5d570030e0 | [
"BSD-3-Clause"
] | null | null | null | '''
Utils to simplify testing
'''
# These imports needed for the `eval` blocks
from classad import classad # noqa
import datetime # noqa
import pandas as pd # noqa
def input_from_file(fname):
with open(fname) as fd:
return eval(fd.read())
def raw_input_from_file(fname):
with open(fname) as fd:
return fd.read()
def compare_dfs(df1, df2):
"""
for some reason df.equals does not work here
but if I compare cell by cell it works
:type df1: :class:`pd.DataFrame`
:arg df1: data frame instance
:type df2: :class:`pd.DataFrame`
:arg df2: data frame instance
:rtype: :obj:`bool` - True if equal
"""
if df1.shape[0] != df2.shape[0]:
return False
if df1.shape[1] != df2.shape[1]:
return False
rc = True
for i in range(df1.shape[0]):
for j in range(df1.shape[1]):
if (df1.iloc[i, j] != df2.iloc[i, j]):
rc = False
break
return rc
| 23.880952 | 50 | 0.581256 |
from classad import classad
import datetime
import pandas as pd
def input_from_file(fname):
with open(fname) as fd:
return eval(fd.read())
def raw_input_from_file(fname):
with open(fname) as fd:
return fd.read()
def compare_dfs(df1, df2):
if df1.shape[0] != df2.shape[0]:
return False
if df1.shape[1] != df2.shape[1]:
return False
rc = True
for i in range(df1.shape[0]):
for j in range(df1.shape[1]):
if (df1.iloc[i, j] != df2.iloc[i, j]):
rc = False
break
return rc
| true | true |
f7f5a3778c355a59e3379cd699be3b3027c9ff73 | 432 | py | Python | pycs/races/elf.py | dwagon/pycs | 4d02acbf380526d3bf0380f6bb8b757a827024b8 | [
"MIT"
] | null | null | null | pycs/races/elf.py | dwagon/pycs | 4d02acbf380526d3bf0380f6bb8b757a827024b8 | [
"MIT"
] | null | null | null | pycs/races/elf.py | dwagon/pycs | 4d02acbf380526d3bf0380f6bb8b757a827024b8 | [
"MIT"
] | null | null | null | """https://www.dndbeyond.com/races/elf"""
from pycs.race import Race
##############################################################################
##############################################################################
##############################################################################
class Elf(Race):
"""Pointy Ears"""
def __init__(self, **kwargs):
super().__init__("Elf", **kwargs)
# EOF
| 27 | 78 | 0.273148 | from pycs.race import Race
| true | true |
f7f5a4048eba5ac128f8d5bee493374049f2c750 | 310 | py | Python | vmapper/map_element/__init__.py | wcchin/mappy | 38015bfc58d39552939d36e0e7eaa696ddbb2c5c | [
"MIT"
] | 2 | 2017-06-10T13:19:58.000Z | 2017-06-25T18:09:04.000Z | vmapper/map_element/__init__.py | wcchin/vmapper | 38015bfc58d39552939d36e0e7eaa696ddbb2c5c | [
"MIT"
] | null | null | null | vmapper/map_element/__init__.py | wcchin/vmapper | 38015bfc58d39552939d36e0e7eaa696ddbb2c5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
### this folder is for the items that use the svg coordinate system, not the projected map coordinates
## not implement yet
from Legend import ColorLegend
from MapScale import MapScale
from TextBox import TextBox
from GridLines import GridLines
import common_function# import get_loc
| 28.181818 | 102 | 0.790323 | true | true | |
f7f5a4adfd883001034c4520dc6ea2b47d1fc20f | 28,993 | py | Python | hosts/utils/nmap_discovery_server.py | reven-tang/ITMP | 8d6686edb19fcc26c9cf1f7e14037f9d38a6e702 | [
"BSD-2-Clause"
] | null | null | null | hosts/utils/nmap_discovery_server.py | reven-tang/ITMP | 8d6686edb19fcc26c9cf1f7e14037f9d38a6e702 | [
"BSD-2-Clause"
] | 11 | 2020-06-05T19:40:52.000Z | 2022-03-11T23:38:17.000Z | hosts/utils/nmap_discovery_server.py | reven-tang/ITMP | 8d6686edb19fcc26c9cf1f7e14037f9d38a6e702 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
import os
import re
import telnetlib
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
from pysnmp.entity.rfc3413.oneliner import cmdgen
import nmap
import time
# import sys
from hosts.models import HostsInfo
from hosts.lib.J_do import J_ssh_do
from hosts.lib.utils import mac_trans,sn_trans,machine_type_trans
from hosts.lib.SnmpVMS import SnmpESXI
from hosts.lib.utils import prpcrypt
from hosts.lib.utils import getsysversion
from hosts.lib.J_do import J_net_do
# from hosts.models import ConnectionInfo,VirtualServerInfo,PhysicalServerInfo,NetConnectionInfo,NetWorkInfo
from hosts.utils.tools import sendmail
import logging
logger = logging.getLogger('hosts.view')
def snmp_begin(nmap_type,ports,password_list,key_file,syscmd_list,black_list,s_emails):
'''
执行系统主机扫描
:param nmap_type:
:param ports:
:param password_list:
:param key_file:
:param key_file:
:param syscmd_list:
:param black_list:
:param s_emails:
:return:
'''
if nmap_type is None: return False
nmap_net = '%s.0/24'%nmap_type
nm_item = NmapDev(black_list)
sship_list,host_list,unkown_list = nm_item.nmap_sship(ports,nmap_net)
canlogin_list,notlogin_list = nm_item.try_login(sship_list,password_list,syscmd_list)
key_login_list,key_not_login_list = nm_item.try_key_login(sship_list,key_file,syscmd_list)
print("Password Login ...",canlogin_list,notlogin_list)
logger.info("Use password login:%s,%s"%(canlogin_list,notlogin_list))
print("Key Login ...",key_login_list,key_not_login_list)
logger.info("Use key login:%s,%s"%(key_login_list,key_not_login_list))
print("start send mail to wujh8701@163.com!!!!!!")
email_message = u"可以ssh 用户密码登录的服务器列表 \n %s \n 可以ssh 用户密钥登录的服务器列表 \n %s \n 无法ssh登录列表 \n %s \n 未知主机 \n %s"%(canlogin_list,\
key_login_list,notlogin_list,unkown_list)
email_sub = u"系统扫描结果"
receive_addr = s_emails
email_s = sendmail(receive_addr,email_sub,email_message)
email_s.send()
# HostsInfo.objects.filter(ip__icontains=nmap_type).delete()
crpt_do = prpcrypt()
if canlogin_list:
for item in canlogin_list:
HostsInfo.objects.filter(ip__exact=item).delete()
mathine_t = canlogin_list[item][7] if canlogin_list[item][7] else u"未知(需要安装dmidecode工具)"
crpt_pass = crpt_do.encrypt(canlogin_list[item][1]) if canlogin_list[item][1] else crpt_do.encrypt('')
HostsInfo.objects.update_or_create(ip=item,
ssh_port=str(canlogin_list[item][0]),
ssh_passwd=crpt_pass,
ssh_user=canlogin_list[item][2],
ssh_status=1,
ssh_type=0,
system_ver=canlogin_list[item][3],
hostname=canlogin_list[item][4],
mac_address=canlogin_list[item][5],
sn=canlogin_list[item][6],
mathine_type=mathine_t)
if key_login_list:
for item in key_login_list:
HostsInfo.objects.filter(ip__exact=item).delete()
mathine_t = key_login_list[item][9] if key_login_list[item][9] else u"未知(需要安装dmidecode工具)"
HostsInfo.objects.update_or_create(ip=item,
ssh_port=key_login_list[item][0],
ssh_rsa=key_login_list[item][1],
ssh_user=key_login_list[item][2],
rsa_pass=key_login_list[item][3],
ssh_status=1,
ssh_type=key_login_list[item][4],
system_ver=key_login_list[item][5],
hostname=key_login_list[item][6],
mac_address=key_login_list[item][7],
sn=key_login_list[item][8],
mathine_type=mathine_t)
return unkown_list,key_not_login_list
class NmapDev(object):
'''
扫描类:扫描获取指定网段主机等对象信息
'''
def __init__(self,black_list=[]):
self.black_list = black_list
self.can_login_lst = {}
self.not_login_lst = {}
self.can_key_login_lst = {}
self.key_not_login_lst = {}
def nmap_allip(self,nmap_net):
'''
扫描网段中存活主机
'''
nm = nmap.PortScanner()
nm.scan(hosts=nmap_net,arguments = ' -n -sP -PE')
# nm.scan(hosts=nmap_net,arguments = ' -n -PA -PS')
hostlist = nm.all_hosts()
return hostlist
def nmap_sship(self,ports,nmap_net):
'''
扫描主机指定ssh端口是否开通ssh端口
:param ports:
:param port_list:
:param unkown_list:
:param nmap_net:
:return:
'''
ports = ports
port_list = ports.split(',')
nm = nmap.PortScanner() # 创建端口扫描对象
ssh_info = {}
unkown_list = []
# 调用扫描方法,参数指定扫描主机hosts,nmap扫描命令行参数arguments
nm.scan(hosts=nmap_net, arguments='-n -sP -PE')
tcp_all_ip = nm.all_hosts()
host_list = []
for ip in tcp_all_ip: # 遍历扫描主机
if nm[ip]['status']['state'] == "up":
host_list.append(ip)
for port in port_list:
try:
print("Scan ip %s ..... Port %s"%(ip,port))
logger.info("Scan ip %s ..... Port %s"%(ip,port))
tm = telnetlib.Telnet(host=ip,port=port,timeout=4)
tm_res = (tm.read_until("\n".encode(),timeout=4)).decode()
if tm_res:
if re.search("ssh",tm_res.lower()):
print(ip,port)
if ip not in self.black_list:
ssh_info[ip]=port
connet = "IP:%s Port:%s Server:%s"%(ip,port,tm_res.lower())
logger.info("IP:%s Port:%s Server:%s"%(ip,port,tm_res.lower()))
print("[?]IP:%s Port:%s Server:%s"%(ip,port,tm_res))
else:
if ip not in unkown_list and ip not in ssh_info.keys():
unkown_list.append(ip)
logger.info("Telnet not ssh server:%s,%s,%s"%(ip,port,tm_res))
print("Open Res.....",ip,port,tm_res)
else:
if ip not in unkown_list and ip not in ssh_info.keys():
unkown_list.append(ip)
logger.info("Telnet no data:%s,%s"%(ip,port))
print("Open....",ip,port)
except EOFError as e:
if ip not in unkown_list and ip not in ssh_info.keys():
unkown_list.append(ip)
unkown_list.append(ip)
logger.exception("Telnet port EOFError:%s,%s,%s"%(ip,port,e))
print("Open....",ip,port)
except Exception as e:
if ip not in unkown_list and ip not in ssh_info.keys():
unkown_list.append(ip)
logger.exception("Telnet port Exception:%s,%s,%s"%(ip,port,e))
print("error...",ip,port,e)
return ssh_info,host_list,list(set(unkown_list))
def try_login(self,sship_list,password_list,syscmd_list):
'''
尝试ssh用户密码登录,获取机器基本信息
:param sship_list:
:param password_list:
:param syscmd_list:
:return:
'''
password_list = password_list
syscmd_list = syscmd_list
if isinstance(sship_list, dict):
ssh_tuple_list = [(ip,port) for ip,port in sship_list.items()]
elif isinstance(sship_list,list):
ssh_tuple_list = sship_list
for ip,port in ssh_tuple_list:
system_info = ""
for password in password_list:
if ip not in self.can_login_lst.keys():
login_info = (ip,int(port),'root', password)
doobj = J_ssh_do(login_info)
res = doobj.pass_do(login_info,syscmd_list)
if res["status"] == "success":
if self.not_login_lst.get('ip'):
self.not_login_lst.pop(ip)
sys_hostname = res["hostname"].decode()
sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"].decode())
sys_sn = sn_trans(res["dmidecode -s system-serial-number |grep -v '^#'"].decode())
system_info = getsysversion([res["cat /etc/issue"].decode(),res["cat /etc/redhat-release"].decode()])
machine_type = machine_type_trans(res["dmidecode -s system-manufacturer |grep -v '^#'"].decode() + res["dmidecode -s system-product-name |grep -v '^#'"].decode())
print("ssh login and exec command:%s",res)
logger.info("ssh login and exec command:%s",res)
self.can_login_lst[ip] = (port,password,'root',system_info,sys_hostname,sys_mac,sys_sn,machine_type)
elif res["status"] == "failed" and re.search(r"reading SSH protocol banner",res["res"]):
# print "res res..........................",res['res']
print("IP:%s Connection closed by remote host,Sleep 60 (s).................. "%ip,res)
time.sleep(60)
else:
if ip not in self.not_login_lst.keys() and ip not in self.can_login_lst.keys():
self.not_login_lst[ip] = port
# print ip,port,password,traceback.print_exc()
return self.can_login_lst,self.not_login_lst
# def try_docker_login(self,sship_list,password_list,syscmd_list):
# '''
# 尝试Docker ssh用户密码登录,获取机器基本信息
# :param sship_list:
# :param password_list:
# :param syscmd_list:
# :return:
# '''
# password_list = password_list
# syscmd_list = syscmd_list
# if isinstance(sship_list, dict):
# ssh_tuple_list = [(ip,port) for ip,port in sship_list.items()]
# elif isinstance(sship_list,list):
# ssh_tuple_list = sship_list
# for ip,port in ssh_tuple_list:
# system_info = ""
# for password in password_list:
# if port not in self.can_login_lst.keys():
# login_info = (ip,int(port),'root', password)
# doobj = J_ssh_do(login_info)
# res = doobj.pass_do(login_info,syscmd_list)
# if res["status"] == "success":
# if self.not_login_lst.has_key(port):
# self.not_login_lst.pop(port)
# sys_hostname = res["hostname"]
# sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"])
# sys_sn = sn_trans(res["dmidecode -s system-serial-number"])
# system_info = getsysversion([res["cat /etc/issue"],res["cat /etc/redhat-release"]])
# machine_type = machine_type_trans(res["dmidecode -s system-manufacturer"] + res["dmidecode -s system-product-name"])
# print("ssh login and exec command:%s",res)
# logger.info("ssh login and exec command:%s",res)
# self.can_login_lst[port] = (ip,password,'root',system_info,sys_hostname,sys_mac,sys_sn,machine_type)
# elif res["status"] == "failed" and re.search(r"reading SSH protocol banner",res["res"]):
# # print "res res..........................",res['res']
# print("IP:%s Connection closed by remote host,Sleep 60 (s).................. "%port,res)
# time.sleep(60)
# else:
# if port not in self.not_login_lst.keys() and port not in self.can_login_lst.keys():
# self.not_login_lst[port] = ip
# # print ip,port,password,traceback.print_exc()
# return self.can_login_lst,self.not_login_lst
def try_key_login(self,sship_list,allkeyfile,syscmd_list):
'''
尝试ssh秘钥登录,获取机器基本信息
:param sship_list:
:param allkeyfile:
:param syscmd_list:
:return:
'''
# import traceback
for ip,port in sship_list.items():
print("try key login....",ip,port)
logger.info("Try ssh key login : %s,%s"%(ip,port))
keyfile = allkeyfile[0]
if ip not in self.can_key_login_lst.keys():
logger.info("Try ssh idrsa key : %s,%s,%s"%(ip,port,keyfile))
print('try idrsakey....',ip,port,keyfile)
login_info = (ip,int(port),'root',keyfile)
doobj = J_ssh_do(login_info)
res = doobj.rsa_do(login_info,syscmd_list)
if res["status"] == "success":
sys_hostname = res["hostname"].decode()
system_info = getsysversion([res["cat /etc/issue"].decode(),res["cat /etc/redhat-release"].decode()])
sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"].decode())
sys_sn = sn_trans(res["dmidecode -s system-serial-number |grep -v '^#'"].decode())
machine_type = machine_type_trans(res["dmidecode -s system-manufacturer |grep -v '^#'"].decode() + res["dmidecode -s system-product-name |grep -v '^#'"].decode())
self.can_key_login_lst[ip] = (port,keyfile,"root","",1,system_info,sys_hostname,sys_mac,sys_sn,machine_type)
if res["status"] == "failed":
keyfile = allkeyfile[1]
logger.info("try iddsa login...%s,%s,%s"%(ip,port,keyfile))
print("try iddsa login...",ip,port,keyfile)
login_info = (ip,port,'root', keyfile)
doobj = J_ssh_do(login_info)
res = doobj.dsa_do(login_info,syscmd_list)
if res["status"] == "success":
sys_hostname = res["hostname"].decode()
system_info = getsysversion([res["cat /etc/issue"].decode(),res["cat /etc/redhat-release"].decode()])
sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"].decode())
sys_sn = sn_trans(res["dmidecode -s system-serial-number |grep -v '^#'"].decode())
machine_type = machine_type_trans(res["dmidecode -s system-manufacturer |grep -v '^#'"].decode() + res["dmidecode -s system-product-name |grep -v '^#'"].decode())
if self.key_not_login_lst.get('ip'):self.key_not_login_lst.pop(ip)
self.can_key_login_lst[ip] = (port,keyfile,"root","",2,system_info,sys_hostname,sys_mac,sys_sn,machine_type)
else:
keyfile = allkeyfile[2]
logger.info("try Non-root idrsa login:%s,%s"%(ip,port))
print("try Non-root idrsa login...",ip,port)
password = '0koooAdmin'
login_info = (ip,port,'imoocc', keyfile,password)
doobj = J_ssh_do(login_info)
res = doobj.non_root_rsa_do(login_info,syscmd_list)
if res["status"] == "success":
sys_hostname = res["hostname"].decode()
sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"].decode())
system_info = getsysversion([res["cat /etc/issue"].decode(),res["cat /etc/redhat-release"].decode()])
sys_sn = sn_trans(res["dmidecode -s system-serial-number |grep -v '^#'"].decode())
machine_type = machine_type_trans(res["dmidecode -s system-manufacturer |grep -v '^#'"].decode() + res["dmidecode -s system-product-name |grep -v '^#'"].decode())
if self.key_not_login_lst.get('ip'):self.key_not_login_lst.pop(ip)
self.can_key_login_lst[ip] = (port,keyfile,"root","",3,system_info,sys_hostname,sys_mac,sys_sn,machine_type)
else:
if ip not in self.key_not_login_lst.keys() and ip not in self.can_key_login_lst.keys():
self.key_not_login_lst[ip] = (port,keyfile)
return self.can_key_login_lst,self.key_not_login_lst
# class NmapNet:
# def __init__(self,sysname_oid="",sn_oid="",community=""):
# self.community = community
# self.sysname_oid = sysname_oid
# self.sn_oids = sn_oid
#
# def sn_query(self,ip,sn_oid):
# try:
# cg = cmdgen.CommandGenerator()
# errorIndication,errorStatus,errorIndex,varBinds = cg.getCmd(
# cmdgen.CommunityData('server',self.community,1),
# cmdgen.UdpTransportTarget((ip,161)),
# '%s'%sn_oid
# )
# result = str(varBinds[0][1]) if varBinds[0][1] else ""
# logger.info("try nmap net device:%s"%result)
#
# except Exception as e:
# # import traceback
# # print traceback.print_exc()
# logger.exception("try nmap net device exception:%s"%e)
# result = None
# return result
#
# def sysname_query(self,ip):
# try:
# cg = cmdgen.CommandGenerator()
# errorIndication,errorStatus,errorIndex,varBinds = cg.getCmd(
# cmdgen.CommunityData('server',self.community,1),
# cmdgen.UdpTransportTarget((ip,161)),
# '%s'%self.sysname_oid
# )
# result = str(varBinds[0][1]) if varBinds[0][1] else ""
# logger.info("try nmap net device:%s"%result)
#
# except Exception as e:
# # import traceback
# # print traceback.print_exc()
# logger.exception("try nmap net device exception:%s"%e)
# result = None
# return result
#
#
# def query(self,ip):
# '''
# 查询交换机的snmp相关信息
# :return:'
# '''
# result = []
# result.append(self.sysname_query(ip))
# for sn_oid in self.sn_oids:
# res_sn = self.sn_query(ip,sn_oid)
# if res_sn:
# break
# result.append(res_sn)
# return result
# class NmapDocker(NmapDev):
#
# def __init__(self,d_cmds,pass_lst,ip_key_dic):
# NmapDev.__init__(self)
# # super(NmapDocker,self).__init__()
# self.docker_cmd_list = ["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk 'BEGIN{FS~/s+/;}{print $NF\" \"$1\" \"$2;}'|sed s/0.0.0.0://"]
# # self.docker_cmd_list = ["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk -F ':' '{print $NF','$1}'"]
# # self.docker_cmd_list = ["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk -F ':' '{print $NF}'|grep '^[0-9]'"]
# self.docker_cmd = d_cmds
# self.password_list = pass_lst
# self.p_docker_relate = {}
# self.ip_key_dic = ip_key_dic
#
#
# def do_nmap(self,host_list):
# ip_items = HostLoginifo.objects.filter(ip__in=host_list)
# ns = prpcrypt()
# for ip_item in ip_items:
# docker_dct = {}
# tmp_sship_list = []
# doobj = J_ssh_do([ip_item.ip,ip_item.ssh_port,ip_item.ssh_user])
# if ip_item.ssh_type==0:
# ssh_passwd = ns.decrypt(ip_item.ssh_passwd)
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ssh_passwd)
# res = doobj.pass_do(login_info,self.docker_cmd_list)
# if ip_item.ssh_type==1:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa)
# res = doobj.rsa_do(login_info,self.docker_cmd_list)
# if ip_item.ssh_type==2:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa)
# res = doobj.dsa_do(login_info,self.docker_cmd_list)
# if ip_item.ssh_type==3:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa,ip_item.rsa_pass)
# res = doobj.imoocc_rsa_do(login_info,self.docker_cmd_list)
#
# # port_list = res["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk -F ':' '{print $NF}'|grep '^[0-9]'"].split("\n")
# port_list = res["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk 'BEGIN{FS~/s+/;}{print $NF\" \"$1\" \"$2;}'|sed s/0.0.0.0://"].split("\n")
# for d_item in port_list:
# if d_item:
# print("...............d_item",d_item)
# d_port,d_id,d_dn = re.split('\s+',d_item)[:3]
# d_cid = d_id + d_dn
# docker_dct[d_port] = sn_trans(d_cid)
#
# sship_list = [(ip_item.ip,port) for port in docker_dct.keys() if port]
#
# nr = prpcrypt()
# canlogin_list,notlogin_list = self.try_docker_login(sship_list,self.password_list,self.docker_cmd)
# for ip,port in sship_list:
# # sship_obj = {}
# # sship_obj[ip] = port
# if canlogin_list.has_key(port):
# id = ConnectionInfo.objects.all().count() + 1
# item_val = canlogin_list[port]
# ssh_passwd = nr.encrypt(item_val[1])
# ConnectionInfo.objects.create(id = id,ssh_username=item_val[2],ssh_userpasswd=ssh_passwd,ssh_hostip=ip,ssh_host_port=port,
# ssh_status=1,ssh_type=4,sn_key=docker_dct[port])
# py_id = PhysicalServerInfo.objects.get(conn_phy__sn_key=self.ip_key_dic[ip]).id
# VirtualServerInfo.objects.create(server_ip=ip,server_type="Docker Contianer",system_ver=item_val[3],sys_hostname=item_val[4],mac=item_val[5],sn=docker_dct[port],vir_phy_id=py_id,conn_vir_id=id)
#
# if notlogin_list.has_key(port):
# item_val = notlogin_list[ip]
# ConnectionInfo.objects.create(id = id,ssh_username=item_val[2],ssh_hostip=ip,ssh_host_port=item_val[0],
# ssh_status=0,ssh_type=5,sn_key=docker_dct[port])
# py_id = PhysicalServerInfo.objects.get(conn_phy__sn_key=self.ip_key_dic[ip]).id
# VirtualServerInfo.objects.create(server_ip=ip,server_type="Docker Contianer",vir_phy_id=py_id,conn_vir_id=id)
# class NmapKVM():
# def __init__(self,ip_key_dic):
# self.p_kvm_cmds = ['cat /sys/class/net/vnet*/address']
# self.ip_key_dic = ip_key_dic
#
# def do_nmap(self,p_kvm_list):
# for item in p_kvm_list:
# ip_items = HostsInfo.objects.filter(ip=item)
# docker_dct = {}
# nr = prpcrypt()
# for ip_item in ip_items:
# doobj = J_ssh_do([ip_item.ip,ip_item.ssh_port,ip_item.ssh_user])
# if ip_item.ssh_type==0:
# ssh_passwd = nr.decrypt(ip_item.ssh_passwd)
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ssh_passwd)
# res = doobj.pass_do(login_info,self.p_kvm_cmds)
# if ip_item.ssh_type==1:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa)
# res = doobj.rsa_do(login_info,self.p_kvm_cmds)
# if ip_item.ssh_type==2:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa)
# res = doobj.dsa_do(login_info,self.p_kvm_cmds)
# if ip_item.ssh_type==3:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa,ip_item.rsa_pass)
# res = doobj.non_root_rsa_do(login_info,self.p_kvm_cmds)
# kvm_mac_list = res[self.p_kvm_cmds[0]].split('\n')
# for kvm_item in kvm_mac_list:
# if kvm_item:
# kvm_mac_str = mac_trans(kvm_item)[-10:]
# py_id = PhysicalServerInfo.objects.get(conn_phy__sn_key=self.ip_key_dic[item]).id
# v_obj = VirtualServerInfo.objects.filter(mac__contains=kvm_mac_str)
# if v_obj:
# v_obj.update(vir_phy=py_id,server_type="KVM")
# else:
# p_ob = PhysicalServerInfo.objects.filter(mac__contains=kvm_mac_str)
# if p_ob and len(p_ob) < 2:
# p_ob.update(vir_phy=py_id,server_type="KVM")
#
# class NmapVMX():
# def __init__(self,vmware_p_list,ip_key_dic):
# self.vmware_p_list = vmware_p_list
# self.ip_key_dic = ip_key_dic
#
# def dosnmp(self):
# nr = prpcrypt()
# for vm_item in self.vmware_p_list:
# vm_sn = self.ip_key_dic[vm_item]
# p_item = ConnectionInfo.objects.filter(sn_key=vm_sn)
# try:
# vmip = p_item[0].ssh_hostip.split(',')[0]
# ssh_passwd = nr.decrypt(p_item[0].ssh_userpasswd)
# ne = SnmpESXI(host=vmip,user=p_item[0].ssh_username,passwd=ssh_passwd)
# res = ne.dosnmp()
# py_id = PhysicalServerInfo.objects.get(conn_phy__sn_key=vm_sn).id
# for v_item in res:
# v_uuid = v_item['uuid']
# v_vmname = v_item['name']
# v_obj = VirtualServerInfo.objects.filter(sn__contains=v_uuid)
# if v_obj:
# v_obj.update(vir_phy=py_id,server_type=u"VMX:%s"%(v_vmname))
# else:
# logger.error("Error:no vmx matched! %s %s"%(v_uuid,v_vmname))
# print("Error:no vmx matched! %s %s"%(v_uuid,v_vmname))
# except Exception as e:
# print("Vmware host sdk connect failed!%s"%(p_item[0].ssh_hostip))
# logger.error("Error:Vmware host sdk connect failed!%s"%(p_item[0].ssh_hostip))
# def NetDevLogin(dev_ips={},backup_sw="False",back_server=""):
# crpt_do = prpcrypt()
# for ip,login_info in dev_ips.items():
# if backup_sw == "True":
# jn = J_net_do(ip,login_info)
# res = jn.cisco_backup(back_server=back_server,sw_backup=True)
# else:
# jn = J_net_do(ip,login_info)
# res = jn.cisco_login()
#
# if res["status"] == "success" and res["level"] == 2:
# dev_obj = NetWorkInfo.objects.filter(host_ip = ip)
# if dev_obj:
# dev_sn_key = dev_obj[0].sn
# dev_id = dev_obj[0].id
#
# con_obj = NetConnectionInfo.objects.filter(sn_key=dev_sn_key)
# con_user,con_passwd,con_en_passwd = res["login_info"]
# crpt_pass = crpt_do.encrypt(con_passwd) if con_passwd else crpt_do.encrypt('')
# en_crpt_pass = crpt_do.encrypt(con_en_passwd) if con_passwd else crpt_do.encrypt('')
#
# if not con_obj:
# NetConnectionInfo.objects.create(tel_username=con_user,tel_userpasswd=crpt_pass,tel_enpasswd=en_crpt_pass,
# tel_host_port='21',tel_hostip=ip,sn_key=dev_sn_key,dev_info_id=dev_id,tel_status=1,tel_type=int(res["level"])) | 53.295956 | 215 | 0.532715 |
import os
import re
import telnetlib
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
from pysnmp.entity.rfc3413.oneliner import cmdgen
import nmap
import time
from hosts.models import HostsInfo
from hosts.lib.J_do import J_ssh_do
from hosts.lib.utils import mac_trans,sn_trans,machine_type_trans
from hosts.lib.SnmpVMS import SnmpESXI
from hosts.lib.utils import prpcrypt
from hosts.lib.utils import getsysversion
from hosts.lib.J_do import J_net_do
from hosts.utils.tools import sendmail
import logging
logger = logging.getLogger('hosts.view')
def snmp_begin(nmap_type,ports,password_list,key_file,syscmd_list,black_list,s_emails):
if nmap_type is None: return False
nmap_net = '%s.0/24'%nmap_type
nm_item = NmapDev(black_list)
sship_list,host_list,unkown_list = nm_item.nmap_sship(ports,nmap_net)
canlogin_list,notlogin_list = nm_item.try_login(sship_list,password_list,syscmd_list)
key_login_list,key_not_login_list = nm_item.try_key_login(sship_list,key_file,syscmd_list)
print("Password Login ...",canlogin_list,notlogin_list)
logger.info("Use password login:%s,%s"%(canlogin_list,notlogin_list))
print("Key Login ...",key_login_list,key_not_login_list)
logger.info("Use key login:%s,%s"%(key_login_list,key_not_login_list))
print("start send mail to wujh8701@163.com!!!!!!")
email_message = u"可以ssh 用户密码登录的服务器列表 \n %s \n 可以ssh 用户密钥登录的服务器列表 \n %s \n 无法ssh登录列表 \n %s \n 未知主机 \n %s"%(canlogin_list,\
key_login_list,notlogin_list,unkown_list)
email_sub = u"系统扫描结果"
receive_addr = s_emails
email_s = sendmail(receive_addr,email_sub,email_message)
email_s.send()
crpt_do = prpcrypt()
if canlogin_list:
for item in canlogin_list:
HostsInfo.objects.filter(ip__exact=item).delete()
mathine_t = canlogin_list[item][7] if canlogin_list[item][7] else u"未知(需要安装dmidecode工具)"
crpt_pass = crpt_do.encrypt(canlogin_list[item][1]) if canlogin_list[item][1] else crpt_do.encrypt('')
HostsInfo.objects.update_or_create(ip=item,
ssh_port=str(canlogin_list[item][0]),
ssh_passwd=crpt_pass,
ssh_user=canlogin_list[item][2],
ssh_status=1,
ssh_type=0,
system_ver=canlogin_list[item][3],
hostname=canlogin_list[item][4],
mac_address=canlogin_list[item][5],
sn=canlogin_list[item][6],
mathine_type=mathine_t)
if key_login_list:
for item in key_login_list:
HostsInfo.objects.filter(ip__exact=item).delete()
mathine_t = key_login_list[item][9] if key_login_list[item][9] else u"未知(需要安装dmidecode工具)"
HostsInfo.objects.update_or_create(ip=item,
ssh_port=key_login_list[item][0],
ssh_rsa=key_login_list[item][1],
ssh_user=key_login_list[item][2],
rsa_pass=key_login_list[item][3],
ssh_status=1,
ssh_type=key_login_list[item][4],
system_ver=key_login_list[item][5],
hostname=key_login_list[item][6],
mac_address=key_login_list[item][7],
sn=key_login_list[item][8],
mathine_type=mathine_t)
return unkown_list,key_not_login_list
class NmapDev(object):
def __init__(self,black_list=[]):
self.black_list = black_list
self.can_login_lst = {}
self.not_login_lst = {}
self.can_key_login_lst = {}
self.key_not_login_lst = {}
def nmap_allip(self,nmap_net):
nm = nmap.PortScanner()
nm.scan(hosts=nmap_net,arguments = ' -n -sP -PE')
hostlist = nm.all_hosts()
return hostlist
def nmap_sship(self,ports,nmap_net):
ports = ports
port_list = ports.split(',')
nm = nmap.PortScanner()
ssh_info = {}
unkown_list = []
nm.scan(hosts=nmap_net, arguments='-n -sP -PE')
tcp_all_ip = nm.all_hosts()
host_list = []
for ip in tcp_all_ip:
if nm[ip]['status']['state'] == "up":
host_list.append(ip)
for port in port_list:
try:
print("Scan ip %s ..... Port %s"%(ip,port))
logger.info("Scan ip %s ..... Port %s"%(ip,port))
tm = telnetlib.Telnet(host=ip,port=port,timeout=4)
tm_res = (tm.read_until("\n".encode(),timeout=4)).decode()
if tm_res:
if re.search("ssh",tm_res.lower()):
print(ip,port)
if ip not in self.black_list:
ssh_info[ip]=port
connet = "IP:%s Port:%s Server:%s"%(ip,port,tm_res.lower())
logger.info("IP:%s Port:%s Server:%s"%(ip,port,tm_res.lower()))
print("[?]IP:%s Port:%s Server:%s"%(ip,port,tm_res))
else:
if ip not in unkown_list and ip not in ssh_info.keys():
unkown_list.append(ip)
logger.info("Telnet not ssh server:%s,%s,%s"%(ip,port,tm_res))
print("Open Res.....",ip,port,tm_res)
else:
if ip not in unkown_list and ip not in ssh_info.keys():
unkown_list.append(ip)
logger.info("Telnet no data:%s,%s"%(ip,port))
print("Open....",ip,port)
except EOFError as e:
if ip not in unkown_list and ip not in ssh_info.keys():
unkown_list.append(ip)
unkown_list.append(ip)
logger.exception("Telnet port EOFError:%s,%s,%s"%(ip,port,e))
print("Open....",ip,port)
except Exception as e:
if ip not in unkown_list and ip not in ssh_info.keys():
unkown_list.append(ip)
logger.exception("Telnet port Exception:%s,%s,%s"%(ip,port,e))
print("error...",ip,port,e)
return ssh_info,host_list,list(set(unkown_list))
def try_login(self,sship_list,password_list,syscmd_list):
password_list = password_list
syscmd_list = syscmd_list
if isinstance(sship_list, dict):
ssh_tuple_list = [(ip,port) for ip,port in sship_list.items()]
elif isinstance(sship_list,list):
ssh_tuple_list = sship_list
for ip,port in ssh_tuple_list:
system_info = ""
for password in password_list:
if ip not in self.can_login_lst.keys():
login_info = (ip,int(port),'root', password)
doobj = J_ssh_do(login_info)
res = doobj.pass_do(login_info,syscmd_list)
if res["status"] == "success":
if self.not_login_lst.get('ip'):
self.not_login_lst.pop(ip)
sys_hostname = res["hostname"].decode()
sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"].decode())
sys_sn = sn_trans(res["dmidecode -s system-serial-number |grep -v '^#'"].decode())
system_info = getsysversion([res["cat /etc/issue"].decode(),res["cat /etc/redhat-release"].decode()])
machine_type = machine_type_trans(res["dmidecode -s system-manufacturer |grep -v '^#'"].decode() + res["dmidecode -s system-product-name |grep -v '^#'"].decode())
print("ssh login and exec command:%s",res)
logger.info("ssh login and exec command:%s",res)
self.can_login_lst[ip] = (port,password,'root',system_info,sys_hostname,sys_mac,sys_sn,machine_type)
elif res["status"] == "failed" and re.search(r"reading SSH protocol banner",res["res"]):
print("IP:%s Connection closed by remote host,Sleep 60 (s).................. "%ip,res)
time.sleep(60)
else:
if ip not in self.not_login_lst.keys() and ip not in self.can_login_lst.keys():
self.not_login_lst[ip] = port
return self.can_login_lst,self.not_login_lst
# 尝试Docker ssh用户密码登录,获取机器基本信息
# :param sship_list:
# :param password_list:
# :param syscmd_list:
# :return:
# '''
for ip,port in sship_list.items():
print("try key login....",ip,port)
logger.info("Try ssh key login : %s,%s"%(ip,port))
keyfile = allkeyfile[0]
if ip not in self.can_key_login_lst.keys():
logger.info("Try ssh idrsa key : %s,%s,%s"%(ip,port,keyfile))
print('try idrsakey....',ip,port,keyfile)
login_info = (ip,int(port),'root',keyfile)
doobj = J_ssh_do(login_info)
res = doobj.rsa_do(login_info,syscmd_list)
if res["status"] == "success":
sys_hostname = res["hostname"].decode()
system_info = getsysversion([res["cat /etc/issue"].decode(),res["cat /etc/redhat-release"].decode()])
sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"].decode())
sys_sn = sn_trans(res["dmidecode -s system-serial-number |grep -v '^#'"].decode())
machine_type = machine_type_trans(res["dmidecode -s system-manufacturer |grep -v '^#'"].decode() + res["dmidecode -s system-product-name |grep -v '^#'"].decode())
self.can_key_login_lst[ip] = (port,keyfile,"root","",1,system_info,sys_hostname,sys_mac,sys_sn,machine_type)
if res["status"] == "failed":
keyfile = allkeyfile[1]
logger.info("try iddsa login...%s,%s,%s"%(ip,port,keyfile))
print("try iddsa login...",ip,port,keyfile)
login_info = (ip,port,'root', keyfile)
doobj = J_ssh_do(login_info)
res = doobj.dsa_do(login_info,syscmd_list)
if res["status"] == "success":
sys_hostname = res["hostname"].decode()
system_info = getsysversion([res["cat /etc/issue"].decode(),res["cat /etc/redhat-release"].decode()])
sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"].decode())
sys_sn = sn_trans(res["dmidecode -s system-serial-number |grep -v '^#'"].decode())
machine_type = machine_type_trans(res["dmidecode -s system-manufacturer |grep -v '^#'"].decode() + res["dmidecode -s system-product-name |grep -v '^#'"].decode())
if self.key_not_login_lst.get('ip'):self.key_not_login_lst.pop(ip)
self.can_key_login_lst[ip] = (port,keyfile,"root","",2,system_info,sys_hostname,sys_mac,sys_sn,machine_type)
else:
keyfile = allkeyfile[2]
logger.info("try Non-root idrsa login:%s,%s"%(ip,port))
print("try Non-root idrsa login...",ip,port)
password = '0koooAdmin'
login_info = (ip,port,'imoocc', keyfile,password)
doobj = J_ssh_do(login_info)
res = doobj.non_root_rsa_do(login_info,syscmd_list)
if res["status"] == "success":
sys_hostname = res["hostname"].decode()
sys_mac = mac_trans(res["cat /sys/class/net/[^vtlsb]*/address||esxcfg-vmknic -l|awk '{print $8}'|grep ':'"].decode())
system_info = getsysversion([res["cat /etc/issue"].decode(),res["cat /etc/redhat-release"].decode()])
sys_sn = sn_trans(res["dmidecode -s system-serial-number |grep -v '^#'"].decode())
machine_type = machine_type_trans(res["dmidecode -s system-manufacturer |grep -v '^#'"].decode() + res["dmidecode -s system-product-name |grep -v '^#'"].decode())
if self.key_not_login_lst.get('ip'):self.key_not_login_lst.pop(ip)
self.can_key_login_lst[ip] = (port,keyfile,"root","",3,system_info,sys_hostname,sys_mac,sys_sn,machine_type)
else:
if ip not in self.key_not_login_lst.keys() and ip not in self.can_key_login_lst.keys():
self.key_not_login_lst[ip] = (port,keyfile)
return self.can_key_login_lst,self.key_not_login_lst
result = []
# result.append(self.sysname_query(ip))
# for sn_oid in self.sn_oids:
# res_sn = self.sn_query(ip,sn_oid)
# if res_sn:
# break
# result.append(res_sn)
# return result
# class NmapDocker(NmapDev):
#
# def __init__(self,d_cmds,pass_lst,ip_key_dic):
# NmapDev.__init__(self)
# # super(NmapDocker,self).__init__()
# self.docker_cmd_list = ["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk 'BEGIN{FS~/s+/;}{print $NF\" \"$1\" \"$2;}'|sed s/0.0.0.0://"]
# # self.docker_cmd_list = ["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk -F ':' '{print $NF','$1}'"]
# # self.docker_cmd_list = ["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk -F ':' '{print $NF}'|grep '^[0-9]'"]
# self.docker_cmd = d_cmds
# self.password_list = pass_lst
# self.p_docker_relate = {}
# self.ip_key_dic = ip_key_dic
#
#
# def do_nmap(self,host_list):
# ip_items = HostLoginifo.objects.filter(ip__in=host_list)
# ns = prpcrypt()
# for ip_item in ip_items:
# docker_dct = {}
# tmp_sship_list = []
# doobj = J_ssh_do([ip_item.ip,ip_item.ssh_port,ip_item.ssh_user])
# if ip_item.ssh_type==0:
# ssh_passwd = ns.decrypt(ip_item.ssh_passwd)
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ssh_passwd)
# res = doobj.pass_do(login_info,self.docker_cmd_list)
# if ip_item.ssh_type==1:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa)
# res = doobj.rsa_do(login_info,self.docker_cmd_list)
# if ip_item.ssh_type==2:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa)
# res = doobj.dsa_do(login_info,self.docker_cmd_list)
# if ip_item.ssh_type==3:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa,ip_item.rsa_pass)
# res = doobj.imoocc_rsa_do(login_info,self.docker_cmd_list)
#
# # port_list = res["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk -F ':' '{print $NF}'|grep '^[0-9]'"].split("\n")
# port_list = res["docker ps |awk -F '->' '{print $1}'|grep -v 'CONTAINER'|awk 'BEGIN{FS~/s+/;}{print $NF\" \"$1\" \"$2;}'|sed s/0.0.0.0://"].split("\n")
# for d_item in port_list:
# if d_item:
# print("...............d_item",d_item)
# d_port,d_id,d_dn = re.split('\s+',d_item)[:3]
# d_cid = d_id + d_dn
# docker_dct[d_port] = sn_trans(d_cid)
#
# sship_list = [(ip_item.ip,port) for port in docker_dct.keys() if port]
#
# nr = prpcrypt()
# canlogin_list,notlogin_list = self.try_docker_login(sship_list,self.password_list,self.docker_cmd)
# for ip,port in sship_list:
# # sship_obj = {}
# # sship_obj[ip] = port
# if canlogin_list.has_key(port):
# id = ConnectionInfo.objects.all().count() + 1
# item_val = canlogin_list[port]
# ssh_passwd = nr.encrypt(item_val[1])
# ConnectionInfo.objects.create(id = id,ssh_username=item_val[2],ssh_userpasswd=ssh_passwd,ssh_hostip=ip,ssh_host_port=port,
# ssh_status=1,ssh_type=4,sn_key=docker_dct[port])
# py_id = PhysicalServerInfo.objects.get(conn_phy__sn_key=self.ip_key_dic[ip]).id
# VirtualServerInfo.objects.create(server_ip=ip,server_type="Docker Contianer",system_ver=item_val[3],sys_hostname=item_val[4],mac=item_val[5],sn=docker_dct[port],vir_phy_id=py_id,conn_vir_id=id)
#
# if notlogin_list.has_key(port):
# item_val = notlogin_list[ip]
# ConnectionInfo.objects.create(id = id,ssh_username=item_val[2],ssh_hostip=ip,ssh_host_port=item_val[0],
# ssh_status=0,ssh_type=5,sn_key=docker_dct[port])
# py_id = PhysicalServerInfo.objects.get(conn_phy__sn_key=self.ip_key_dic[ip]).id
# VirtualServerInfo.objects.create(server_ip=ip,server_type="Docker Contianer",vir_phy_id=py_id,conn_vir_id=id)
# class NmapKVM():
# def __init__(self,ip_key_dic):
# self.p_kvm_cmds = ['cat /sys/class/net/vnet*/address']
# self.ip_key_dic = ip_key_dic
#
# def do_nmap(self,p_kvm_list):
# for item in p_kvm_list:
# ip_items = HostsInfo.objects.filter(ip=item)
# docker_dct = {}
# nr = prpcrypt()
# for ip_item in ip_items:
# doobj = J_ssh_do([ip_item.ip,ip_item.ssh_port,ip_item.ssh_user])
# if ip_item.ssh_type==0:
# ssh_passwd = nr.decrypt(ip_item.ssh_passwd)
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ssh_passwd)
# res = doobj.pass_do(login_info,self.p_kvm_cmds)
# if ip_item.ssh_type==1:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa)
# res = doobj.rsa_do(login_info,self.p_kvm_cmds)
# if ip_item.ssh_type==2:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa)
# res = doobj.dsa_do(login_info,self.p_kvm_cmds)
# if ip_item.ssh_type==3:
# login_info = (ip_item.ip,int(ip_item.ssh_port),ip_item.ssh_user,ip_item.ssh_rsa,ip_item.rsa_pass)
# res = doobj.non_root_rsa_do(login_info,self.p_kvm_cmds)
# kvm_mac_list = res[self.p_kvm_cmds[0]].split('\n')
# for kvm_item in kvm_mac_list:
# if kvm_item:
# kvm_mac_str = mac_trans(kvm_item)[-10:]
# py_id = PhysicalServerInfo.objects.get(conn_phy__sn_key=self.ip_key_dic[item]).id
# v_obj = VirtualServerInfo.objects.filter(mac__contains=kvm_mac_str)
# if v_obj:
# v_obj.update(vir_phy=py_id,server_type="KVM")
# else:
# p_ob = PhysicalServerInfo.objects.filter(mac__contains=kvm_mac_str)
# if p_ob and len(p_ob) < 2:
# p_ob.update(vir_phy=py_id,server_type="KVM")
#
# class NmapVMX():
# def __init__(self,vmware_p_list,ip_key_dic):
# self.vmware_p_list = vmware_p_list
# self.ip_key_dic = ip_key_dic
#
# def dosnmp(self):
# nr = prpcrypt()
# for vm_item in self.vmware_p_list:
# vm_sn = self.ip_key_dic[vm_item]
# p_item = ConnectionInfo.objects.filter(sn_key=vm_sn)
# try:
# vmip = p_item[0].ssh_hostip.split(',')[0]
# ssh_passwd = nr.decrypt(p_item[0].ssh_userpasswd)
# ne = SnmpESXI(host=vmip,user=p_item[0].ssh_username,passwd=ssh_passwd)
# res = ne.dosnmp()
# py_id = PhysicalServerInfo.objects.get(conn_phy__sn_key=vm_sn).id
# for v_item in res:
# v_uuid = v_item['uuid']
# v_vmname = v_item['name']
# v_obj = VirtualServerInfo.objects.filter(sn__contains=v_uuid)
# if v_obj:
# v_obj.update(vir_phy=py_id,server_type=u"VMX:%s"%(v_vmname))
# else:
# logger.error("Error:no vmx matched! %s %s"%(v_uuid,v_vmname))
# print("Error:no vmx matched! %s %s"%(v_uuid,v_vmname))
# except Exception as e:
# print("Vmware host sdk connect failed!%s"%(p_item[0].ssh_hostip))
# logger.error("Error:Vmware host sdk connect failed!%s"%(p_item[0].ssh_hostip))
# def NetDevLogin(dev_ips={},backup_sw="False",back_server=""):
# crpt_do = prpcrypt()
# for ip,login_info in dev_ips.items():
# if backup_sw == "True":
# jn = J_net_do(ip,login_info)
# res = jn.cisco_backup(back_server=back_server,sw_backup=True)
# else:
# jn = J_net_do(ip,login_info)
# res = jn.cisco_login()
#
# if res["status"] == "success" and res["level"] == 2:
# dev_obj = NetWorkInfo.objects.filter(host_ip = ip)
# if dev_obj:
# dev_sn_key = dev_obj[0].sn
# dev_id = dev_obj[0].id
#
# con_obj = NetConnectionInfo.objects.filter(sn_key=dev_sn_key)
# con_user,con_passwd,con_en_passwd = res["login_info"]
# crpt_pass = crpt_do.encrypt(con_passwd) if con_passwd else crpt_do.encrypt('')
# en_crpt_pass = crpt_do.encrypt(con_en_passwd) if con_passwd else crpt_do.encrypt('')
#
# if not con_obj:
# NetConnectionInfo.objects.create(tel_username=con_user,tel_userpasswd=crpt_pass,tel_enpasswd=en_crpt_pass,
# tel_host_port='21',tel_hostip=ip,sn_key=dev_sn_key,dev_info_id=dev_id,tel_status=1,tel_type=int(res["level"])) | true | true |
f7f5a516c0e2b72a9ae5b2b71a5aab1a65000d86 | 2,147 | py | Python | xfel/command_line/cxi_cspad_pinwheel.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | xfel/command_line/cxi_cspad_pinwheel.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | xfel/command_line/cxi_cspad_pinwheel.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
# LIBTBX_SET_DISPATCHER_NAME cxi.cspad_pinwheel
#
# Removes all but the central sensors from a CSPAD CBF
#
import dxtbx, sys, os
import libtbx.option_parser
from xfel.cftbx.detector.cspad_cbf_tbx import cbf_file_to_basis_dict, write_cspad_cbf
from libtbx.utils import Usage
def run(argv=None):
if argv is None:
argv = sys.argv[1:]
command_line = libtbx.option_parser.option_parser(
usage="%s files" % libtbx.env.dispatcher_name).process(args=argv)
paths = command_line.args
if len(paths) <= 0:
raise Usage("No files specified")
for path in paths:
# Load the metrology dictionary, containting basis shifts for each item in the hierarchy
metro = cbf_file_to_basis_dict(path)
# Remove from the hiearchy all but the central sensors (sensor 1 of each quadrant).
# Need to remove the sesnor basis shifts and the corresponding asic shifts
for quad in range(4):
for sensor in [0,2,3,4,5,6,7]:
metro.pop((0,quad,sensor))
for asic in range(2):
metro.pop((0,quad,sensor,asic))
# Renumber the sensors to 0 instead of 1
for key in metro:
if len(key) == 3:
detector, quad, sensor = key
metro[(detector,quad,0)] = metro.pop(key)
elif len(key) == 4:
detector, quad, sensor, asic = key
metro[(detector,quad,0,asic)] = metro.pop(key)
# Build the tiles dictionary for only sensor 1 of each quadrant. Rename that sensor to zero.
img = dxtbx.load(path)
tiles = {}
for quad in range(4):
src_sensor = 1
dest_sensor = 0
for asic in range(2):
tiles[(0,quad,dest_sensor,asic)] = img.get_raw_data()[(quad*16)+(src_sensor*2)+asic] # FIXME get the panel ID from dxtbx
destpath = os.path.splitext(path)[0] + "_pinwheel.cbf"
hierarchy = img.get_detector().hierarchy()
beam = img.get_beam()
# write the result. Have to call abs on the root distance because of a bug with the hierarchy matrix.
write_cspad_cbf(tiles, metro, 'cbf', None, destpath, beam.get_wavelength(), hierarchy.get_distance())
if (__name__ == "__main__") :
run(sys.argv[1:])
| 34.079365 | 128 | 0.681416 | from __future__ import division
import dxtbx, sys, os
import libtbx.option_parser
from xfel.cftbx.detector.cspad_cbf_tbx import cbf_file_to_basis_dict, write_cspad_cbf
from libtbx.utils import Usage
def run(argv=None):
if argv is None:
argv = sys.argv[1:]
command_line = libtbx.option_parser.option_parser(
usage="%s files" % libtbx.env.dispatcher_name).process(args=argv)
paths = command_line.args
if len(paths) <= 0:
raise Usage("No files specified")
for path in paths:
metro = cbf_file_to_basis_dict(path)
for quad in range(4):
for sensor in [0,2,3,4,5,6,7]:
metro.pop((0,quad,sensor))
for asic in range(2):
metro.pop((0,quad,sensor,asic))
for key in metro:
if len(key) == 3:
detector, quad, sensor = key
metro[(detector,quad,0)] = metro.pop(key)
elif len(key) == 4:
detector, quad, sensor, asic = key
metro[(detector,quad,0,asic)] = metro.pop(key)
img = dxtbx.load(path)
tiles = {}
for quad in range(4):
src_sensor = 1
dest_sensor = 0
for asic in range(2):
tiles[(0,quad,dest_sensor,asic)] = img.get_raw_data()[(quad*16)+(src_sensor*2)+asic]
destpath = os.path.splitext(path)[0] + "_pinwheel.cbf"
hierarchy = img.get_detector().hierarchy()
beam = img.get_beam()
write_cspad_cbf(tiles, metro, 'cbf', None, destpath, beam.get_wavelength(), hierarchy.get_distance())
if (__name__ == "__main__") :
run(sys.argv[1:])
| true | true |
f7f5a6d24ae8d8102312982ab72dc82c86d62d15 | 788 | py | Python | src/antidote/lib/lazy/__init__.py | Finistere/dependency_manager | 5a183d46ac5d760944dc507d1281813d02d2c75e | [
"MIT"
] | null | null | null | src/antidote/lib/lazy/__init__.py | Finistere/dependency_manager | 5a183d46ac5d760944dc507d1281813d02d2c75e | [
"MIT"
] | null | null | null | src/antidote/lib/lazy/__init__.py | Finistere/dependency_manager | 5a183d46ac5d760944dc507d1281813d02d2c75e | [
"MIT"
] | null | null | null | from .constant import (
Const,
Constant,
ConstantFactory,
ConstantValueProviderFunction,
TypedConstantFactory,
ConstantValueProvider,
)
from .lazy import lazy, LazyWrappedFunction
from ..._internal import API
__all__ = [
"ConstantFactory",
"TypedConstantFactory",
"ConstantValueProviderFunction",
"ConstantValueProvider",
"const",
"lazy",
"LazyWrappedFunction",
"register_lazy_provider",
"Constant",
]
@API.private
def __const() -> Const:
from ._constant_factory import ConstImpl
return ConstImpl()
# Singleton instance of Const.
const: Const = __const()
@API.experimental
def register_lazy_provider() -> None:
from ... import world
from ._provider import LazyProvider
world.provider(LazyProvider)
| 18.325581 | 44 | 0.706853 | from .constant import (
Const,
Constant,
ConstantFactory,
ConstantValueProviderFunction,
TypedConstantFactory,
ConstantValueProvider,
)
from .lazy import lazy, LazyWrappedFunction
from ..._internal import API
__all__ = [
"ConstantFactory",
"TypedConstantFactory",
"ConstantValueProviderFunction",
"ConstantValueProvider",
"const",
"lazy",
"LazyWrappedFunction",
"register_lazy_provider",
"Constant",
]
@API.private
def __const() -> Const:
from ._constant_factory import ConstImpl
return ConstImpl()
const: Const = __const()
@API.experimental
def register_lazy_provider() -> None:
from ... import world
from ._provider import LazyProvider
world.provider(LazyProvider)
| true | true |
f7f5a6d6911455764ba26f751a9e0e2614ddceea | 722 | py | Python | tests/extmod/urandom_basic.py | rxchen/micropython | 037b2c72a1d5b54a5508a58ab2044628a7a39fa4 | [
"MIT"
] | 13,648 | 2015-01-01T01:34:51.000Z | 2022-03-31T16:19:53.000Z | tests/extmod/urandom_basic.py | rxchen/micropython | 037b2c72a1d5b54a5508a58ab2044628a7a39fa4 | [
"MIT"
] | 7,092 | 2015-01-01T07:59:11.000Z | 2022-03-31T23:52:18.000Z | tests/extmod/urandom_basic.py | rxchen/micropython | 037b2c72a1d5b54a5508a58ab2044628a7a39fa4 | [
"MIT"
] | 4,942 | 2015-01-02T11:48:50.000Z | 2022-03-31T19:57:10.000Z | try:
import urandom as random
except ImportError:
try:
import random
except ImportError:
print("SKIP")
raise SystemExit
# check getrandbits returns a value within the bit range
for b in (1, 2, 3, 4, 16, 32):
for i in range(50):
assert random.getrandbits(b) < (1 << b)
# check that seed(0) gives a non-zero value
random.seed(0)
print(random.getrandbits(16) != 0)
# check that PRNG is repeatable
random.seed(1)
r = random.getrandbits(16)
random.seed(1)
print(random.getrandbits(16) == r)
# check that zero bits works
print(random.getrandbits(0))
# check that it throws an error for negative bits
try:
random.getrandbits(-1)
except ValueError:
print("ValueError")
| 21.878788 | 56 | 0.68144 | try:
import urandom as random
except ImportError:
try:
import random
except ImportError:
print("SKIP")
raise SystemExit
for b in (1, 2, 3, 4, 16, 32):
for i in range(50):
assert random.getrandbits(b) < (1 << b)
random.seed(0)
print(random.getrandbits(16) != 0)
random.seed(1)
r = random.getrandbits(16)
random.seed(1)
print(random.getrandbits(16) == r)
print(random.getrandbits(0))
try:
random.getrandbits(-1)
except ValueError:
print("ValueError")
| true | true |
f7f5a6e86c4bd5c0356b3aa92255f319f2f13124 | 7,874 | py | Python | mixmo/core/loss.py | JiarunLiu/mixmo-pytorch | a9ad674122d9b6512094b8292280a4045bb5a400 | [
"Apache-2.0"
] | 72 | 2021-03-26T12:34:52.000Z | 2022-03-27T06:39:57.000Z | mixmo/core/loss.py | JiarunLiu/mixmo-pytorch | a9ad674122d9b6512094b8292280a4045bb5a400 | [
"Apache-2.0"
] | 7 | 2021-08-06T02:13:54.000Z | 2022-02-08T01:20:32.000Z | mixmo/core/loss.py | JiarunLiu/mixmo-pytorch | a9ad674122d9b6512094b8292280a4045bb5a400 | [
"Apache-2.0"
] | 15 | 2021-04-10T17:34:45.000Z | 2022-03-02T11:49:34.000Z | """
Base loss definitions
"""
from collections import OrderedDict
import copy
import torch
import torch.nn as nn
from mixmo.utils import misc, logger
LOGGER = logger.get_logger(__name__, level="DEBUG")
class AbstractLoss(nn.modules.loss._Loss):
"""
Base loss class defining printing and logging utilies
"""
def __init__(self, config_args, device, config_loss=None):
self.device = device
self.config_args = config_args or {}
self.config_loss = config_loss or {}
self.name = self.config_loss["display_name"]
nn.modules.loss._Loss.__init__(self)
def print_details(self):
LOGGER.info(f"Using loss: {self.config_loss} with name: {self.name}")
def start_accumulator(self):
self._accumulator_loss = 0
self._accumulator_len = 0
def get_accumulator_stats(self, format="short", split=None):
"""
Gather tracked stats into a dictionary as formatted strings
"""
if not self._accumulator_len:
return {}
stats = OrderedDict({})
loss_value = self._accumulator_loss / self._accumulator_len
if format == "long":
assert split is not None
key = split + "/" + self.name
stats[key] = {
"value": loss_value,
"string": f"{loss_value:.5}",
}
else:
# make it as short as possibe to fit on one line of tqdm postfix
loss_string = f"{loss_value:.3}".replace("e-0", "-").replace("e-", "-")
stats[self.name] = loss_string
return stats
def forward(self, input, target):
current_loss = self._forward(input, target)
self._accumulator_loss += current_loss.detach().to("cpu").numpy()
self._accumulator_len += 1
return current_loss
def _forward(self, input, target):
raise NotImplementedError
class SoftCrossEntropyLoss(AbstractLoss):
"""
Soft CrossEntropy loss that specifies the proper forward function for AbstractLoss
"""
def _forward(self, input, target):
"""
Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets, can be soft
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
if len(target.size()) == 1:
target = torch.nn.functional.one_hot(target, num_classes=input.size(-1))
target = target.to(torch.float).to(self.device)
logsoftmax = torch.nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
DICT_LOSS_STANDARD = {
"soft_cross_entropy": SoftCrossEntropyLoss,
}
class WrapperLoss(AbstractLoss):
"""
Wrapper around the multiple losses. Initialized from listloss.
"""
def __init__(self, config_loss, config_args, device):
AbstractLoss.__init__(
self,
config_args=config_args,
config_loss=config_loss,
device=device,
)
self.losses = self._init_get_losses()
self.regularized_network = None
def _init_get_losses(self):
"""
Initialize and gather losses from listloss
"""
losses = []
for ic, config_loss in enumerate(self.config_loss["listloss"]):
if config_loss["coeff"] == "<num_members":
config_loss["coeff"] = (1. if ic < self.config_args["num_members"] else 0)
if config_loss["coeff"] == 0:
LOGGER.debug(f"Skip loss: {config_loss}")
continue
loss_callable = get_loss(config_loss, device=self.device, config_args=self.config_args)
loss = copy.deepcopy(config_loss)
loss["callable"] = loss_callable
losses.append(loss)
return losses
def print_details(self):
return
def start_accumulator(self):
AbstractLoss.start_accumulator(self)
for loss in self.losses:
loss["callable"].start_accumulator()
def get_accumulator_stats(self, format="short", split=None):
"""
Gather tracked stats into a dictionary as formatted strings
"""
if not self._accumulator_len:
return {}
stats = AbstractLoss.get_accumulator_stats(self, format=format, split=split)
if format == "long":
# tensorboard logs
if self.config_loss.get("l2_reg"):
l2_reg = self.l2_reg().detach().to("cpu").numpy()
stats["general/l2_reg"] = {
"value": l2_reg,
"string": f"{l2_reg:.4}",
}
for loss in self.losses:
substats = loss["callable"].get_accumulator_stats(
format=format,
split=split,
)
misc.clean_update(stats, substats)
return stats
def _forward(self, input, target):
"""
Perform loss forwards for each sublosses and l2 reg
"""
computed_losses = [self._forward_subloss(loss, input, target) for loss in self.losses]
stacked_computed_losses = torch.stack(computed_losses)
final_loss = stacked_computed_losses.sum()
if self.config_loss.get("l2_reg"):
final_loss = final_loss + self.l2_reg() * float(self.config_loss.get("l2_reg"))
return final_loss
def _forward_subloss(self, loss, input, target):
"""
Standard loss forward for one of the sublosses
"""
coeff = float(loss["coeff"])
subloss_input = self._match_item(loss["input"], dict_tensors=input)
subloss_target = self._match_item(loss["target"], dict_tensors=target)
loss = loss["callable"](input=subloss_input, target=subloss_target)
return loss * coeff
@staticmethod
def _match_item(name, dict_tensors):
if misc.is_none(name):
return None
if name in dict_tensors:
return dict_tensors[str(name)]
raise ValueError(name)
def set_regularized_network(self, network):
if self.config_loss.get("l2_reg"):
self.regularized_network = network
LOGGER.warning(f"Set l2 regularization on {network.__class__.__name__}")
def l2_reg(self,):
"""
Compute l2 regularization/weight decay over the non-excluded parameters
"""
assert self.regularized_network is not None
# Retrieve non excluded parameters
params = list(self.regularized_network.parameters())
# Iterate over all parameters to decay
l2_reg = None
for W in params:
if l2_reg is None:
l2_reg = torch.sum(torch.pow(W, 2))
else:
l2_reg = l2_reg + torch.sum(torch.pow(W, 2))
assert l2_reg is not None
return l2_reg
def get_loss(config_loss, device=None, config_args=None):
"""
Construct loss object, wrapped if there are multiple losses
"""
loss_name = config_loss["name"]
if loss_name == "multitask":
loss = WrapperLoss(config_args=config_args, device=device, config_loss=config_loss)
elif loss_name in DICT_LOSS_STANDARD:
loss = DICT_LOSS_STANDARD[loss_name](
config_loss=config_loss, config_args=config_args, device=device
)
else:
raise Exception(f"Loss {loss_name} not implemented")
loss.print_details()
return loss
| 32.945607 | 99 | 0.604648 | from collections import OrderedDict
import copy
import torch
import torch.nn as nn
from mixmo.utils import misc, logger
LOGGER = logger.get_logger(__name__, level="DEBUG")
class AbstractLoss(nn.modules.loss._Loss):
def __init__(self, config_args, device, config_loss=None):
self.device = device
self.config_args = config_args or {}
self.config_loss = config_loss or {}
self.name = self.config_loss["display_name"]
nn.modules.loss._Loss.__init__(self)
def print_details(self):
LOGGER.info(f"Using loss: {self.config_loss} with name: {self.name}")
def start_accumulator(self):
self._accumulator_loss = 0
self._accumulator_len = 0
def get_accumulator_stats(self, format="short", split=None):
if not self._accumulator_len:
return {}
stats = OrderedDict({})
loss_value = self._accumulator_loss / self._accumulator_len
if format == "long":
assert split is not None
key = split + "/" + self.name
stats[key] = {
"value": loss_value,
"string": f"{loss_value:.5}",
}
else:
loss_string = f"{loss_value:.3}".replace("e-0", "-").replace("e-", "-")
stats[self.name] = loss_string
return stats
def forward(self, input, target):
current_loss = self._forward(input, target)
self._accumulator_loss += current_loss.detach().to("cpu").numpy()
self._accumulator_len += 1
return current_loss
def _forward(self, input, target):
raise NotImplementedError
class SoftCrossEntropyLoss(AbstractLoss):
def _forward(self, input, target):
if len(target.size()) == 1:
target = torch.nn.functional.one_hot(target, num_classes=input.size(-1))
target = target.to(torch.float).to(self.device)
logsoftmax = torch.nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
DICT_LOSS_STANDARD = {
"soft_cross_entropy": SoftCrossEntropyLoss,
}
class WrapperLoss(AbstractLoss):
def __init__(self, config_loss, config_args, device):
AbstractLoss.__init__(
self,
config_args=config_args,
config_loss=config_loss,
device=device,
)
self.losses = self._init_get_losses()
self.regularized_network = None
def _init_get_losses(self):
losses = []
for ic, config_loss in enumerate(self.config_loss["listloss"]):
if config_loss["coeff"] == "<num_members":
config_loss["coeff"] = (1. if ic < self.config_args["num_members"] else 0)
if config_loss["coeff"] == 0:
LOGGER.debug(f"Skip loss: {config_loss}")
continue
loss_callable = get_loss(config_loss, device=self.device, config_args=self.config_args)
loss = copy.deepcopy(config_loss)
loss["callable"] = loss_callable
losses.append(loss)
return losses
def print_details(self):
return
def start_accumulator(self):
AbstractLoss.start_accumulator(self)
for loss in self.losses:
loss["callable"].start_accumulator()
def get_accumulator_stats(self, format="short", split=None):
if not self._accumulator_len:
return {}
stats = AbstractLoss.get_accumulator_stats(self, format=format, split=split)
if format == "long":
if self.config_loss.get("l2_reg"):
l2_reg = self.l2_reg().detach().to("cpu").numpy()
stats["general/l2_reg"] = {
"value": l2_reg,
"string": f"{l2_reg:.4}",
}
for loss in self.losses:
substats = loss["callable"].get_accumulator_stats(
format=format,
split=split,
)
misc.clean_update(stats, substats)
return stats
def _forward(self, input, target):
computed_losses = [self._forward_subloss(loss, input, target) for loss in self.losses]
stacked_computed_losses = torch.stack(computed_losses)
final_loss = stacked_computed_losses.sum()
if self.config_loss.get("l2_reg"):
final_loss = final_loss + self.l2_reg() * float(self.config_loss.get("l2_reg"))
return final_loss
def _forward_subloss(self, loss, input, target):
coeff = float(loss["coeff"])
subloss_input = self._match_item(loss["input"], dict_tensors=input)
subloss_target = self._match_item(loss["target"], dict_tensors=target)
loss = loss["callable"](input=subloss_input, target=subloss_target)
return loss * coeff
@staticmethod
def _match_item(name, dict_tensors):
if misc.is_none(name):
return None
if name in dict_tensors:
return dict_tensors[str(name)]
raise ValueError(name)
def set_regularized_network(self, network):
if self.config_loss.get("l2_reg"):
self.regularized_network = network
LOGGER.warning(f"Set l2 regularization on {network.__class__.__name__}")
def l2_reg(self,):
assert self.regularized_network is not None
params = list(self.regularized_network.parameters())
l2_reg = None
for W in params:
if l2_reg is None:
l2_reg = torch.sum(torch.pow(W, 2))
else:
l2_reg = l2_reg + torch.sum(torch.pow(W, 2))
assert l2_reg is not None
return l2_reg
def get_loss(config_loss, device=None, config_args=None):
loss_name = config_loss["name"]
if loss_name == "multitask":
loss = WrapperLoss(config_args=config_args, device=device, config_loss=config_loss)
elif loss_name in DICT_LOSS_STANDARD:
loss = DICT_LOSS_STANDARD[loss_name](
config_loss=config_loss, config_args=config_args, device=device
)
else:
raise Exception(f"Loss {loss_name} not implemented")
loss.print_details()
return loss
| true | true |
f7f5a71c2e457365064ffae96243d732cf2df886 | 946 | py | Python | setup.py | rajibchakravorty/QDataSet | 8eb21b8c7dad5654358021dd73b93ab90443f6d0 | [
"MIT"
] | null | null | null | setup.py | rajibchakravorty/QDataSet | 8eb21b8c7dad5654358021dd73b93ab90443f6d0 | [
"MIT"
] | null | null | null | setup.py | rajibchakravorty/QDataSet | 8eb21b8c7dad5654358021dd73b93ab90443f6d0 | [
"MIT"
] | null | null | null | """Set up for qmldataset package
"""
import setuptools
# with open("README.md", "r", encoding="utf-8") as fh:
# long_description = fh.read()
setuptools.setup(
name="qmldataset",
version="0.1.0",
author="Rajib Chakravorty",
author_email="4748396+rajibchakravorty@users.noreply.github.com",
description="Synthetic data generator for ML applications in Quantum Hardware system",
long_description="",
long_description_content_type="text/markdown",
#url="https://github.com/pypa/sampleproject",
#project_urls={
# "Bug Tracker": "https://github.com/pypa/sampleproject/issues",
#},
#classifiers=[
# "Programming Language :: Python :: 3",
# "License :: OSI Approved :: MIT License",
# "Operating System :: OS Independent",
#],
#package_dir={"": "src"},
packages=setuptools.find_packages(include=['qmldataset', 'qmldataset.*']),
python_requires=">=3.8, <4.0"
) | 32.62069 | 90 | 0.655391 |
import setuptools
setuptools.setup(
name="qmldataset",
version="0.1.0",
author="Rajib Chakravorty",
author_email="4748396+rajibchakravorty@users.noreply.github.com",
description="Synthetic data generator for ML applications in Quantum Hardware system",
long_description="",
long_description_content_type="text/markdown",
packages=setuptools.find_packages(include=['qmldataset', 'qmldataset.*']),
python_requires=">=3.8, <4.0"
) | true | true |
f7f5a7aac926c7d5197ded9047c24bae9c5020b3 | 3,895 | py | Python | caffe2/python/operator_test/mkl_speed_test.py | KevinKecc/caffe2 | a2b6c6e2f0686358a84277df65e9489fb7d9ddb2 | [
"Apache-2.0"
] | 58 | 2019-01-03T02:20:41.000Z | 2022-02-25T14:24:13.000Z | caffe2/python/operator_test/mkl_speed_test.py | KevinKecc/caffe2 | a2b6c6e2f0686358a84277df65e9489fb7d9ddb2 | [
"Apache-2.0"
] | 27 | 2018-04-14T06:44:22.000Z | 2018-08-01T18:02:39.000Z | caffe2/python/operator_test/mkl_speed_test.py | KevinKecc/caffe2 | a2b6c6e2f0686358a84277df65e9489fb7d9ddb2 | [
"Apache-2.0"
] | 23 | 2018-04-13T10:47:31.000Z | 2021-05-06T08:38:06.000Z | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testReLUSpeed(self):
X = np.random.randn(128, 4096).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.Relu("X", "Y")
net.Relu("X_mkl", "Y_mkl", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-10,
rtol=1e-10)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
# The returned runtime is the time of
# [whole_net, cpu_op, mkl_op]
# so we will assume that the MKL one runs faster than the CPU one.
# Note(Yangqing): in fact, it seems that in optimized mode, this is
# not always guaranteed - MKL runs slower than the Eigen vectorized
# version, so I am turning this assertion off.
#self.assertTrue(runtime[1] >= runtime[2])
print("Relu CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 27, 27).astype(np.float32) - 0.5
W = np.random.rand(192, 256, 3, 3).astype(np.float32) - 0.5
b = np.random.rand(192).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.Conv(["X", "W", "b"], "Y", pad=1, stride=1, kernel=3)
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "Y_mkl",
pad=1, stride=1, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("Conv CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
if __name__ == '__main__':
unittest.main()
| 40.572917 | 84 | 0.63543 | true | true | |
f7f5a8054c4429dd898a8fa648632b6869d56a41 | 2,852 | py | Python | sendSMSSkillLambda/package/ask_sdk_model/interfaces/geolocation/geolocation_interface.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | null | null | null | sendSMSSkillLambda/package/ask_sdk_model/interfaces/geolocation/geolocation_interface.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | null | null | null | sendSMSSkillLambda/package/ask_sdk_model/interfaces/geolocation/geolocation_interface.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | 1 | 2019-10-11T17:15:20.000Z | 2019-10-11T17:15:20.000Z | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class GeolocationInterface(object):
"""
"""
deserialized_types = {
} # type: Dict
attribute_map = {
} # type: Dict
def __init__(self):
# type: () -> None
"""
"""
self.__discriminator_value = None # type: str
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, GeolocationInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 29.102041 | 96 | 0.565217 |
import pprint
import re
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class GeolocationInterface(object):
deserialized_types = {
}
attribute_map = {
}
def __init__(self):
self.__discriminator_value = None
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, GeolocationInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7f5a8705d3c517124eae802368d5bf49e085781 | 47,821 | py | Python | google/cloud/notebooks_v1/services/notebook_service/transports/grpc.py | LaudateCorpus1/python-notebooks | 72925babe34fb97639336e443b2ee588d6727680 | [
"Apache-2.0"
] | 17 | 2020-09-19T18:30:50.000Z | 2022-03-15T11:35:50.000Z | google/cloud/notebooks_v1/services/notebook_service/transports/grpc.py | LaudateCorpus1/python-notebooks | 72925babe34fb97639336e443b2ee588d6727680 | [
"Apache-2.0"
] | 42 | 2020-08-05T00:16:45.000Z | 2022-03-07T17:06:48.000Z | google/cloud/notebooks_v1/services/notebook_service/transports/grpc.py | LaudateCorpus1/python-notebooks | 72925babe34fb97639336e443b2ee588d6727680 | [
"Apache-2.0"
] | 5 | 2020-08-04T23:40:58.000Z | 2022-03-13T19:02:02.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.notebooks_v1.types import environment
from google.cloud.notebooks_v1.types import execution
from google.cloud.notebooks_v1.types import instance
from google.cloud.notebooks_v1.types import schedule
from google.cloud.notebooks_v1.types import service
from google.longrunning import operations_pb2 # type: ignore
from .base import NotebookServiceTransport, DEFAULT_CLIENT_INFO
class NotebookServiceGrpcTransport(NotebookServiceTransport):
"""gRPC backend transport for NotebookService.
API v1 service for Cloud AI Platform Notebooks.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "notebooks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "notebooks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def list_instances(
self,
) -> Callable[[service.ListInstancesRequest], service.ListInstancesResponse]:
r"""Return a callable for the list instances method over gRPC.
Lists instances in a given project and location.
Returns:
Callable[[~.ListInstancesRequest],
~.ListInstancesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_instances" not in self._stubs:
self._stubs["list_instances"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ListInstances",
request_serializer=service.ListInstancesRequest.serialize,
response_deserializer=service.ListInstancesResponse.deserialize,
)
return self._stubs["list_instances"]
@property
def get_instance(self) -> Callable[[service.GetInstanceRequest], instance.Instance]:
r"""Return a callable for the get instance method over gRPC.
Gets details of a single Instance.
Returns:
Callable[[~.GetInstanceRequest],
~.Instance]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_instance" not in self._stubs:
self._stubs["get_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetInstance",
request_serializer=service.GetInstanceRequest.serialize,
response_deserializer=instance.Instance.deserialize,
)
return self._stubs["get_instance"]
@property
def create_instance(
self,
) -> Callable[[service.CreateInstanceRequest], operations_pb2.Operation]:
r"""Return a callable for the create instance method over gRPC.
Creates a new Instance in a given project and
location.
Returns:
Callable[[~.CreateInstanceRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_instance" not in self._stubs:
self._stubs["create_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/CreateInstance",
request_serializer=service.CreateInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_instance"]
@property
def register_instance(
self,
) -> Callable[[service.RegisterInstanceRequest], operations_pb2.Operation]:
r"""Return a callable for the register instance method over gRPC.
Registers an existing legacy notebook instance to the
Notebooks API server. Legacy instances are instances
created with the legacy Compute Engine calls. They are
not manageable by the Notebooks API out of the box. This
call makes these instances manageable by the Notebooks
API.
Returns:
Callable[[~.RegisterInstanceRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "register_instance" not in self._stubs:
self._stubs["register_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/RegisterInstance",
request_serializer=service.RegisterInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["register_instance"]
@property
def set_instance_accelerator(
self,
) -> Callable[[service.SetInstanceAcceleratorRequest], operations_pb2.Operation]:
r"""Return a callable for the set instance accelerator method over gRPC.
Updates the guest accelerators of a single Instance.
Returns:
Callable[[~.SetInstanceAcceleratorRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_instance_accelerator" not in self._stubs:
self._stubs["set_instance_accelerator"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/SetInstanceAccelerator",
request_serializer=service.SetInstanceAcceleratorRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["set_instance_accelerator"]
@property
def set_instance_machine_type(
self,
) -> Callable[[service.SetInstanceMachineTypeRequest], operations_pb2.Operation]:
r"""Return a callable for the set instance machine type method over gRPC.
Updates the machine type of a single Instance.
Returns:
Callable[[~.SetInstanceMachineTypeRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_instance_machine_type" not in self._stubs:
self._stubs["set_instance_machine_type"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/SetInstanceMachineType",
request_serializer=service.SetInstanceMachineTypeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["set_instance_machine_type"]
@property
def update_instance_config(
self,
) -> Callable[[service.UpdateInstanceConfigRequest], operations_pb2.Operation]:
r"""Return a callable for the update instance config method over gRPC.
Update Notebook Instance configurations.
Returns:
Callable[[~.UpdateInstanceConfigRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_instance_config" not in self._stubs:
self._stubs["update_instance_config"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/UpdateInstanceConfig",
request_serializer=service.UpdateInstanceConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_instance_config"]
@property
def update_shielded_instance_config(
self,
) -> Callable[
[service.UpdateShieldedInstanceConfigRequest], operations_pb2.Operation
]:
r"""Return a callable for the update shielded instance
config method over gRPC.
Updates the Shielded instance configuration of a
single Instance.
Returns:
Callable[[~.UpdateShieldedInstanceConfigRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_shielded_instance_config" not in self._stubs:
self._stubs[
"update_shielded_instance_config"
] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/UpdateShieldedInstanceConfig",
request_serializer=service.UpdateShieldedInstanceConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_shielded_instance_config"]
@property
def set_instance_labels(
self,
) -> Callable[[service.SetInstanceLabelsRequest], operations_pb2.Operation]:
r"""Return a callable for the set instance labels method over gRPC.
Replaces all the labels of an Instance.
Returns:
Callable[[~.SetInstanceLabelsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_instance_labels" not in self._stubs:
self._stubs["set_instance_labels"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/SetInstanceLabels",
request_serializer=service.SetInstanceLabelsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["set_instance_labels"]
@property
def delete_instance(
self,
) -> Callable[[service.DeleteInstanceRequest], operations_pb2.Operation]:
r"""Return a callable for the delete instance method over gRPC.
Deletes a single Instance.
Returns:
Callable[[~.DeleteInstanceRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_instance" not in self._stubs:
self._stubs["delete_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/DeleteInstance",
request_serializer=service.DeleteInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_instance"]
@property
def start_instance(
self,
) -> Callable[[service.StartInstanceRequest], operations_pb2.Operation]:
r"""Return a callable for the start instance method over gRPC.
Starts a notebook instance.
Returns:
Callable[[~.StartInstanceRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "start_instance" not in self._stubs:
self._stubs["start_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/StartInstance",
request_serializer=service.StartInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["start_instance"]
@property
def stop_instance(
self,
) -> Callable[[service.StopInstanceRequest], operations_pb2.Operation]:
r"""Return a callable for the stop instance method over gRPC.
Stops a notebook instance.
Returns:
Callable[[~.StopInstanceRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "stop_instance" not in self._stubs:
self._stubs["stop_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/StopInstance",
request_serializer=service.StopInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["stop_instance"]
@property
def reset_instance(
self,
) -> Callable[[service.ResetInstanceRequest], operations_pb2.Operation]:
r"""Return a callable for the reset instance method over gRPC.
Resets a notebook instance.
Returns:
Callable[[~.ResetInstanceRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "reset_instance" not in self._stubs:
self._stubs["reset_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ResetInstance",
request_serializer=service.ResetInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["reset_instance"]
@property
def report_instance_info(
self,
) -> Callable[[service.ReportInstanceInfoRequest], operations_pb2.Operation]:
r"""Return a callable for the report instance info method over gRPC.
Allows notebook instances to
report their latest instance information to the
Notebooks API server. The server will merge the reported
information to the instance metadata store. Do not use
this method directly.
Returns:
Callable[[~.ReportInstanceInfoRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "report_instance_info" not in self._stubs:
self._stubs["report_instance_info"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ReportInstanceInfo",
request_serializer=service.ReportInstanceInfoRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["report_instance_info"]
@property
def is_instance_upgradeable(
self,
) -> Callable[
[service.IsInstanceUpgradeableRequest], service.IsInstanceUpgradeableResponse
]:
r"""Return a callable for the is instance upgradeable method over gRPC.
Check if a notebook instance is upgradable.
Returns:
Callable[[~.IsInstanceUpgradeableRequest],
~.IsInstanceUpgradeableResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "is_instance_upgradeable" not in self._stubs:
self._stubs["is_instance_upgradeable"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/IsInstanceUpgradeable",
request_serializer=service.IsInstanceUpgradeableRequest.serialize,
response_deserializer=service.IsInstanceUpgradeableResponse.deserialize,
)
return self._stubs["is_instance_upgradeable"]
@property
def get_instance_health(
self,
) -> Callable[
[service.GetInstanceHealthRequest], service.GetInstanceHealthResponse
]:
r"""Return a callable for the get instance health method over gRPC.
Check if a notebook instance is healthy.
Returns:
Callable[[~.GetInstanceHealthRequest],
~.GetInstanceHealthResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_instance_health" not in self._stubs:
self._stubs["get_instance_health"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetInstanceHealth",
request_serializer=service.GetInstanceHealthRequest.serialize,
response_deserializer=service.GetInstanceHealthResponse.deserialize,
)
return self._stubs["get_instance_health"]
@property
def upgrade_instance(
self,
) -> Callable[[service.UpgradeInstanceRequest], operations_pb2.Operation]:
r"""Return a callable for the upgrade instance method over gRPC.
Upgrades a notebook instance to the latest version.
Returns:
Callable[[~.UpgradeInstanceRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "upgrade_instance" not in self._stubs:
self._stubs["upgrade_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/UpgradeInstance",
request_serializer=service.UpgradeInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["upgrade_instance"]
@property
def rollback_instance(
self,
) -> Callable[[service.RollbackInstanceRequest], operations_pb2.Operation]:
r"""Return a callable for the rollback instance method over gRPC.
Rollbacks a notebook instance to the previous
version.
Returns:
Callable[[~.RollbackInstanceRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "rollback_instance" not in self._stubs:
self._stubs["rollback_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/RollbackInstance",
request_serializer=service.RollbackInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["rollback_instance"]
@property
def upgrade_instance_internal(
self,
) -> Callable[[service.UpgradeInstanceInternalRequest], operations_pb2.Operation]:
r"""Return a callable for the upgrade instance internal method over gRPC.
Allows notebook instances to
call this endpoint to upgrade themselves. Do not use
this method directly.
Returns:
Callable[[~.UpgradeInstanceInternalRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "upgrade_instance_internal" not in self._stubs:
self._stubs["upgrade_instance_internal"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/UpgradeInstanceInternal",
request_serializer=service.UpgradeInstanceInternalRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["upgrade_instance_internal"]
@property
def list_environments(
self,
) -> Callable[[service.ListEnvironmentsRequest], service.ListEnvironmentsResponse]:
r"""Return a callable for the list environments method over gRPC.
Lists environments in a project.
Returns:
Callable[[~.ListEnvironmentsRequest],
~.ListEnvironmentsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_environments" not in self._stubs:
self._stubs["list_environments"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ListEnvironments",
request_serializer=service.ListEnvironmentsRequest.serialize,
response_deserializer=service.ListEnvironmentsResponse.deserialize,
)
return self._stubs["list_environments"]
@property
def get_environment(
self,
) -> Callable[[service.GetEnvironmentRequest], environment.Environment]:
r"""Return a callable for the get environment method over gRPC.
Gets details of a single Environment.
Returns:
Callable[[~.GetEnvironmentRequest],
~.Environment]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_environment" not in self._stubs:
self._stubs["get_environment"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetEnvironment",
request_serializer=service.GetEnvironmentRequest.serialize,
response_deserializer=environment.Environment.deserialize,
)
return self._stubs["get_environment"]
@property
def create_environment(
self,
) -> Callable[[service.CreateEnvironmentRequest], operations_pb2.Operation]:
r"""Return a callable for the create environment method over gRPC.
Creates a new Environment.
Returns:
Callable[[~.CreateEnvironmentRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_environment" not in self._stubs:
self._stubs["create_environment"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/CreateEnvironment",
request_serializer=service.CreateEnvironmentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_environment"]
@property
def delete_environment(
self,
) -> Callable[[service.DeleteEnvironmentRequest], operations_pb2.Operation]:
r"""Return a callable for the delete environment method over gRPC.
Deletes a single Environment.
Returns:
Callable[[~.DeleteEnvironmentRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_environment" not in self._stubs:
self._stubs["delete_environment"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/DeleteEnvironment",
request_serializer=service.DeleteEnvironmentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_environment"]
@property
def list_schedules(
self,
) -> Callable[[service.ListSchedulesRequest], service.ListSchedulesResponse]:
r"""Return a callable for the list schedules method over gRPC.
Lists schedules in a given project and location.
Returns:
Callable[[~.ListSchedulesRequest],
~.ListSchedulesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_schedules" not in self._stubs:
self._stubs["list_schedules"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ListSchedules",
request_serializer=service.ListSchedulesRequest.serialize,
response_deserializer=service.ListSchedulesResponse.deserialize,
)
return self._stubs["list_schedules"]
@property
def get_schedule(self) -> Callable[[service.GetScheduleRequest], schedule.Schedule]:
r"""Return a callable for the get schedule method over gRPC.
Gets details of schedule
Returns:
Callable[[~.GetScheduleRequest],
~.Schedule]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_schedule" not in self._stubs:
self._stubs["get_schedule"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetSchedule",
request_serializer=service.GetScheduleRequest.serialize,
response_deserializer=schedule.Schedule.deserialize,
)
return self._stubs["get_schedule"]
@property
def delete_schedule(
self,
) -> Callable[[service.DeleteScheduleRequest], operations_pb2.Operation]:
r"""Return a callable for the delete schedule method over gRPC.
Deletes schedule and all underlying jobs
Returns:
Callable[[~.DeleteScheduleRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_schedule" not in self._stubs:
self._stubs["delete_schedule"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/DeleteSchedule",
request_serializer=service.DeleteScheduleRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_schedule"]
@property
def create_schedule(
self,
) -> Callable[[service.CreateScheduleRequest], operations_pb2.Operation]:
r"""Return a callable for the create schedule method over gRPC.
Creates a new Scheduled Notebook in a given project
and location.
Returns:
Callable[[~.CreateScheduleRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_schedule" not in self._stubs:
self._stubs["create_schedule"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/CreateSchedule",
request_serializer=service.CreateScheduleRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_schedule"]
@property
def trigger_schedule(
self,
) -> Callable[[service.TriggerScheduleRequest], operations_pb2.Operation]:
r"""Return a callable for the trigger schedule method over gRPC.
Triggers execution of an existing schedule.
Returns:
Callable[[~.TriggerScheduleRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "trigger_schedule" not in self._stubs:
self._stubs["trigger_schedule"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/TriggerSchedule",
request_serializer=service.TriggerScheduleRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["trigger_schedule"]
@property
def list_executions(
self,
) -> Callable[[service.ListExecutionsRequest], service.ListExecutionsResponse]:
r"""Return a callable for the list executions method over gRPC.
Lists executions in a given project and location
Returns:
Callable[[~.ListExecutionsRequest],
~.ListExecutionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_executions" not in self._stubs:
self._stubs["list_executions"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ListExecutions",
request_serializer=service.ListExecutionsRequest.serialize,
response_deserializer=service.ListExecutionsResponse.deserialize,
)
return self._stubs["list_executions"]
@property
def get_execution(
self,
) -> Callable[[service.GetExecutionRequest], execution.Execution]:
r"""Return a callable for the get execution method over gRPC.
Gets details of executions
Returns:
Callable[[~.GetExecutionRequest],
~.Execution]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_execution" not in self._stubs:
self._stubs["get_execution"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetExecution",
request_serializer=service.GetExecutionRequest.serialize,
response_deserializer=execution.Execution.deserialize,
)
return self._stubs["get_execution"]
@property
def delete_execution(
self,
) -> Callable[[service.DeleteExecutionRequest], operations_pb2.Operation]:
r"""Return a callable for the delete execution method over gRPC.
Deletes execution
Returns:
Callable[[~.DeleteExecutionRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_execution" not in self._stubs:
self._stubs["delete_execution"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/DeleteExecution",
request_serializer=service.DeleteExecutionRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_execution"]
@property
def create_execution(
self,
) -> Callable[[service.CreateExecutionRequest], operations_pb2.Operation]:
r"""Return a callable for the create execution method over gRPC.
Creates a new Scheduled Notebook in a given project
and location.
Returns:
Callable[[~.CreateExecutionRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_execution" not in self._stubs:
self._stubs["create_execution"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/CreateExecution",
request_serializer=service.CreateExecutionRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_execution"]
def close(self):
self.grpc_channel.close()
__all__ = ("NotebookServiceGrpcTransport",)
| 43.198735 | 90 | 0.638255 |
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.cloud.notebooks_v1.types import environment
from google.cloud.notebooks_v1.types import execution
from google.cloud.notebooks_v1.types import instance
from google.cloud.notebooks_v1.types import schedule
from google.cloud.notebooks_v1.types import service
from google.longrunning import operations_pb2
from .base import NotebookServiceTransport, DEFAULT_CLIENT_INFO
class NotebookServiceGrpcTransport(NotebookServiceTransport):
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "notebooks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "notebooks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
return self._operations_client
@property
def list_instances(
self,
) -> Callable[[service.ListInstancesRequest], service.ListInstancesResponse]:
if "list_instances" not in self._stubs:
self._stubs["list_instances"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ListInstances",
request_serializer=service.ListInstancesRequest.serialize,
response_deserializer=service.ListInstancesResponse.deserialize,
)
return self._stubs["list_instances"]
@property
def get_instance(self) -> Callable[[service.GetInstanceRequest], instance.Instance]:
if "get_instance" not in self._stubs:
self._stubs["get_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetInstance",
request_serializer=service.GetInstanceRequest.serialize,
response_deserializer=instance.Instance.deserialize,
)
return self._stubs["get_instance"]
@property
def create_instance(
self,
) -> Callable[[service.CreateInstanceRequest], operations_pb2.Operation]:
if "create_instance" not in self._stubs:
self._stubs["create_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/CreateInstance",
request_serializer=service.CreateInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_instance"]
@property
def register_instance(
self,
) -> Callable[[service.RegisterInstanceRequest], operations_pb2.Operation]:
if "register_instance" not in self._stubs:
self._stubs["register_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/RegisterInstance",
request_serializer=service.RegisterInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["register_instance"]
@property
def set_instance_accelerator(
self,
) -> Callable[[service.SetInstanceAcceleratorRequest], operations_pb2.Operation]:
if "set_instance_accelerator" not in self._stubs:
self._stubs["set_instance_accelerator"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/SetInstanceAccelerator",
request_serializer=service.SetInstanceAcceleratorRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["set_instance_accelerator"]
@property
def set_instance_machine_type(
self,
) -> Callable[[service.SetInstanceMachineTypeRequest], operations_pb2.Operation]:
if "set_instance_machine_type" not in self._stubs:
self._stubs["set_instance_machine_type"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/SetInstanceMachineType",
request_serializer=service.SetInstanceMachineTypeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["set_instance_machine_type"]
@property
def update_instance_config(
self,
) -> Callable[[service.UpdateInstanceConfigRequest], operations_pb2.Operation]:
if "update_instance_config" not in self._stubs:
self._stubs["update_instance_config"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/UpdateInstanceConfig",
request_serializer=service.UpdateInstanceConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_instance_config"]
@property
def update_shielded_instance_config(
self,
) -> Callable[
[service.UpdateShieldedInstanceConfigRequest], operations_pb2.Operation
]:
if "update_shielded_instance_config" not in self._stubs:
self._stubs[
"update_shielded_instance_config"
] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/UpdateShieldedInstanceConfig",
request_serializer=service.UpdateShieldedInstanceConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_shielded_instance_config"]
@property
def set_instance_labels(
self,
) -> Callable[[service.SetInstanceLabelsRequest], operations_pb2.Operation]:
if "set_instance_labels" not in self._stubs:
self._stubs["set_instance_labels"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/SetInstanceLabels",
request_serializer=service.SetInstanceLabelsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["set_instance_labels"]
@property
def delete_instance(
self,
) -> Callable[[service.DeleteInstanceRequest], operations_pb2.Operation]:
if "delete_instance" not in self._stubs:
self._stubs["delete_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/DeleteInstance",
request_serializer=service.DeleteInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_instance"]
@property
def start_instance(
self,
) -> Callable[[service.StartInstanceRequest], operations_pb2.Operation]:
if "start_instance" not in self._stubs:
self._stubs["start_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/StartInstance",
request_serializer=service.StartInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["start_instance"]
@property
def stop_instance(
self,
) -> Callable[[service.StopInstanceRequest], operations_pb2.Operation]:
if "stop_instance" not in self._stubs:
self._stubs["stop_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/StopInstance",
request_serializer=service.StopInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["stop_instance"]
@property
def reset_instance(
self,
) -> Callable[[service.ResetInstanceRequest], operations_pb2.Operation]:
if "reset_instance" not in self._stubs:
self._stubs["reset_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ResetInstance",
request_serializer=service.ResetInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["reset_instance"]
@property
def report_instance_info(
self,
) -> Callable[[service.ReportInstanceInfoRequest], operations_pb2.Operation]:
if "report_instance_info" not in self._stubs:
self._stubs["report_instance_info"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ReportInstanceInfo",
request_serializer=service.ReportInstanceInfoRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["report_instance_info"]
@property
def is_instance_upgradeable(
self,
) -> Callable[
[service.IsInstanceUpgradeableRequest], service.IsInstanceUpgradeableResponse
]:
if "is_instance_upgradeable" not in self._stubs:
self._stubs["is_instance_upgradeable"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/IsInstanceUpgradeable",
request_serializer=service.IsInstanceUpgradeableRequest.serialize,
response_deserializer=service.IsInstanceUpgradeableResponse.deserialize,
)
return self._stubs["is_instance_upgradeable"]
@property
def get_instance_health(
self,
) -> Callable[
[service.GetInstanceHealthRequest], service.GetInstanceHealthResponse
]:
if "get_instance_health" not in self._stubs:
self._stubs["get_instance_health"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetInstanceHealth",
request_serializer=service.GetInstanceHealthRequest.serialize,
response_deserializer=service.GetInstanceHealthResponse.deserialize,
)
return self._stubs["get_instance_health"]
@property
def upgrade_instance(
self,
) -> Callable[[service.UpgradeInstanceRequest], operations_pb2.Operation]:
if "upgrade_instance" not in self._stubs:
self._stubs["upgrade_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/UpgradeInstance",
request_serializer=service.UpgradeInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["upgrade_instance"]
@property
def rollback_instance(
self,
) -> Callable[[service.RollbackInstanceRequest], operations_pb2.Operation]:
if "rollback_instance" not in self._stubs:
self._stubs["rollback_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/RollbackInstance",
request_serializer=service.RollbackInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["rollback_instance"]
@property
def upgrade_instance_internal(
self,
) -> Callable[[service.UpgradeInstanceInternalRequest], operations_pb2.Operation]:
if "upgrade_instance_internal" not in self._stubs:
self._stubs["upgrade_instance_internal"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/UpgradeInstanceInternal",
request_serializer=service.UpgradeInstanceInternalRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["upgrade_instance_internal"]
@property
def list_environments(
self,
) -> Callable[[service.ListEnvironmentsRequest], service.ListEnvironmentsResponse]:
if "list_environments" not in self._stubs:
self._stubs["list_environments"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ListEnvironments",
request_serializer=service.ListEnvironmentsRequest.serialize,
response_deserializer=service.ListEnvironmentsResponse.deserialize,
)
return self._stubs["list_environments"]
@property
def get_environment(
self,
) -> Callable[[service.GetEnvironmentRequest], environment.Environment]:
if "get_environment" not in self._stubs:
self._stubs["get_environment"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetEnvironment",
request_serializer=service.GetEnvironmentRequest.serialize,
response_deserializer=environment.Environment.deserialize,
)
return self._stubs["get_environment"]
@property
def create_environment(
self,
) -> Callable[[service.CreateEnvironmentRequest], operations_pb2.Operation]:
if "create_environment" not in self._stubs:
self._stubs["create_environment"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/CreateEnvironment",
request_serializer=service.CreateEnvironmentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_environment"]
@property
def delete_environment(
self,
) -> Callable[[service.DeleteEnvironmentRequest], operations_pb2.Operation]:
if "delete_environment" not in self._stubs:
self._stubs["delete_environment"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/DeleteEnvironment",
request_serializer=service.DeleteEnvironmentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_environment"]
@property
def list_schedules(
self,
) -> Callable[[service.ListSchedulesRequest], service.ListSchedulesResponse]:
if "list_schedules" not in self._stubs:
self._stubs["list_schedules"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ListSchedules",
request_serializer=service.ListSchedulesRequest.serialize,
response_deserializer=service.ListSchedulesResponse.deserialize,
)
return self._stubs["list_schedules"]
@property
def get_schedule(self) -> Callable[[service.GetScheduleRequest], schedule.Schedule]:
if "get_schedule" not in self._stubs:
self._stubs["get_schedule"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetSchedule",
request_serializer=service.GetScheduleRequest.serialize,
response_deserializer=schedule.Schedule.deserialize,
)
return self._stubs["get_schedule"]
@property
def delete_schedule(
self,
) -> Callable[[service.DeleteScheduleRequest], operations_pb2.Operation]:
if "delete_schedule" not in self._stubs:
self._stubs["delete_schedule"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/DeleteSchedule",
request_serializer=service.DeleteScheduleRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_schedule"]
@property
def create_schedule(
self,
) -> Callable[[service.CreateScheduleRequest], operations_pb2.Operation]:
if "create_schedule" not in self._stubs:
self._stubs["create_schedule"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/CreateSchedule",
request_serializer=service.CreateScheduleRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_schedule"]
@property
def trigger_schedule(
self,
) -> Callable[[service.TriggerScheduleRequest], operations_pb2.Operation]:
if "trigger_schedule" not in self._stubs:
self._stubs["trigger_schedule"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/TriggerSchedule",
request_serializer=service.TriggerScheduleRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["trigger_schedule"]
@property
def list_executions(
self,
) -> Callable[[service.ListExecutionsRequest], service.ListExecutionsResponse]:
if "list_executions" not in self._stubs:
self._stubs["list_executions"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/ListExecutions",
request_serializer=service.ListExecutionsRequest.serialize,
response_deserializer=service.ListExecutionsResponse.deserialize,
)
return self._stubs["list_executions"]
@property
def get_execution(
self,
) -> Callable[[service.GetExecutionRequest], execution.Execution]:
if "get_execution" not in self._stubs:
self._stubs["get_execution"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/GetExecution",
request_serializer=service.GetExecutionRequest.serialize,
response_deserializer=execution.Execution.deserialize,
)
return self._stubs["get_execution"]
@property
def delete_execution(
self,
) -> Callable[[service.DeleteExecutionRequest], operations_pb2.Operation]:
if "delete_execution" not in self._stubs:
self._stubs["delete_execution"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/DeleteExecution",
request_serializer=service.DeleteExecutionRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_execution"]
@property
def create_execution(
self,
) -> Callable[[service.CreateExecutionRequest], operations_pb2.Operation]:
if "create_execution" not in self._stubs:
self._stubs["create_execution"] = self.grpc_channel.unary_unary(
"/google.cloud.notebooks.v1.NotebookService/CreateExecution",
request_serializer=service.CreateExecutionRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_execution"]
def close(self):
self.grpc_channel.close()
__all__ = ("NotebookServiceGrpcTransport",)
| true | true |
f7f5a8b1312d0b9e0bf9392e892850a42ae60dc6 | 4,146 | py | Python | pgoapi/protos/pogoprotos/networking/requests/messages/add_fort_modifier_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 14 | 2017-03-28T16:32:24.000Z | 2021-03-13T23:03:57.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/add_fort_modifier_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 8 | 2017-03-01T07:56:09.000Z | 2017-08-15T07:37:12.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/add_fort_modifier_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 15 | 2017-02-24T01:30:23.000Z | 2021-06-27T08:46:43.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/add_fort_modifier_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_id_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/add_fort_modifier_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nGpogoprotos/networking/requests/messages/add_fort_modifier_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\'pogoprotos/inventory/item/item_id.proto\"\x96\x01\n\x16\x41\x64\x64\x46ortModifierMessage\x12\x38\n\rmodifier_type\x18\x01 \x01(\x0e\x32!.pogoprotos.inventory.item.ItemId\x12\x0f\n\x07\x66ort_id\x18\x02 \x01(\t\x12\x17\n\x0fplayer_latitude\x18\x03 \x01(\x01\x12\x18\n\x10player_longitude\x18\x04 \x01(\x01\x62\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__id__pb2.DESCRIPTOR,])
_ADDFORTMODIFIERMESSAGE = _descriptor.Descriptor(
name='AddFortModifierMessage',
full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='modifier_type', full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage.modifier_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fort_id', full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage.fort_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_latitude', full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage.player_latitude', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_longitude', full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage.player_longitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=158,
serialized_end=308,
)
_ADDFORTMODIFIERMESSAGE.fields_by_name['modifier_type'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__id__pb2._ITEMID
DESCRIPTOR.message_types_by_name['AddFortModifierMessage'] = _ADDFORTMODIFIERMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AddFortModifierMessage = _reflection.GeneratedProtocolMessageType('AddFortModifierMessage', (_message.Message,), dict(
DESCRIPTOR = _ADDFORTMODIFIERMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.add_fort_modifier_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.AddFortModifierMessage)
))
_sym_db.RegisterMessage(AddFortModifierMessage)
# @@protoc_insertion_point(module_scope)
| 44.106383 | 466 | 0.790159 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_id_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/add_fort_modifier_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nGpogoprotos/networking/requests/messages/add_fort_modifier_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\'pogoprotos/inventory/item/item_id.proto\"\x96\x01\n\x16\x41\x64\x64\x46ortModifierMessage\x12\x38\n\rmodifier_type\x18\x01 \x01(\x0e\x32!.pogoprotos.inventory.item.ItemId\x12\x0f\n\x07\x66ort_id\x18\x02 \x01(\t\x12\x17\n\x0fplayer_latitude\x18\x03 \x01(\x01\x12\x18\n\x10player_longitude\x18\x04 \x01(\x01\x62\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__id__pb2.DESCRIPTOR,])
_ADDFORTMODIFIERMESSAGE = _descriptor.Descriptor(
name='AddFortModifierMessage',
full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='modifier_type', full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage.modifier_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fort_id', full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage.fort_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_latitude', full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage.player_latitude', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_longitude', full_name='pogoprotos.networking.requests.messages.AddFortModifierMessage.player_longitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=158,
serialized_end=308,
)
_ADDFORTMODIFIERMESSAGE.fields_by_name['modifier_type'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__id__pb2._ITEMID
DESCRIPTOR.message_types_by_name['AddFortModifierMessage'] = _ADDFORTMODIFIERMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AddFortModifierMessage = _reflection.GeneratedProtocolMessageType('AddFortModifierMessage', (_message.Message,), dict(
DESCRIPTOR = _ADDFORTMODIFIERMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.add_fort_modifier_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.AddFortModifierMessage)
))
_sym_db.RegisterMessage(AddFortModifierMessage)
# @@protoc_insertion_point(module_scope)
| true | true |
f7f5a913b1a8d7482826a294101b9d878c06db31 | 162 | py | Python | CodingBat/Warmup-1/diff21.py | N-l1/dmoj | bbd55ab45731774385805eb31ea790454a3a6819 | [
"MIT"
] | null | null | null | CodingBat/Warmup-1/diff21.py | N-l1/dmoj | bbd55ab45731774385805eb31ea790454a3a6819 | [
"MIT"
] | null | null | null | CodingBat/Warmup-1/diff21.py | N-l1/dmoj | bbd55ab45731774385805eb31ea790454a3a6819 | [
"MIT"
] | null | null | null | """
Warmup-1 > diff21
Find this problem at:
https://codingbat.com/prob/p197466
"""
def diff21(n):
if n > 21:
return abs(2*(n-21))
return 21 - n
| 13.5 | 34 | 0.58642 |
def diff21(n):
if n > 21:
return abs(2*(n-21))
return 21 - n
| true | true |
f7f5ab09d521d1856fbecab111d78475713ab1df | 114,757 | py | Python | jenkins_jobs/modules/triggers.py | temeo/jenkins-job-builder | 9337c8d61497316ed832c427ee5cc8ebadf27a03 | [
"Apache-2.0"
] | null | null | null | jenkins_jobs/modules/triggers.py | temeo/jenkins-job-builder | 9337c8d61497316ed832c427ee5cc8ebadf27a03 | [
"Apache-2.0"
] | null | null | null | jenkins_jobs/modules/triggers.py | temeo/jenkins-job-builder | 9337c8d61497316ed832c427ee5cc8ebadf27a03 | [
"Apache-2.0"
] | 1 | 2022-03-31T07:54:13.000Z | 2022-03-31T07:54:13.000Z | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Triggers define what causes a Jenkins job to start building.
**Component**: triggers
:Macro: trigger
:Entry Point: jenkins_jobs.triggers
Example::
job:
name: test_job
triggers:
- timed: '@daily'
"""
import logging
import pkg_resources
import re
import sys
import xml.etree.ElementTree as XML
import six
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
import jenkins_jobs.modules.helpers as helpers
logger = logging.getLogger(str(__name__))
def gerrit_handle_legacy_configuration(data):
hyphenizer = re.compile("[A-Z]")
def hyphenize(attr):
"""Convert strings like triggerOn to trigger-on."""
return hyphenizer.sub(lambda x: "-%s" % x.group(0).lower(), attr)
def convert_dict(d, old_keys):
for old_key in old_keys:
if old_key in d:
new_key = hyphenize(old_key)
logger.warning(
"'%s' is deprecated and will be removed after "
"1.0.0, please use '%s' instead",
old_key,
new_key,
)
d[new_key] = d[old_key]
del d[old_key]
convert_dict(
data,
[
"triggerOnPatchsetUploadedEvent",
"triggerOnChangeAbandonedEvent",
"triggerOnChangeMergedEvent",
"triggerOnChangeRestoredEvent",
"triggerOnCommentAddedEvent",
"triggerOnDraftPublishedEvent",
"triggerOnRefUpdatedEvent",
"triggerApprovalCategory",
"triggerApprovalValue",
"overrideVotes",
"gerritBuildSuccessfulVerifiedValue",
"gerritBuildFailedVerifiedValue",
"failureMessage",
"skipVote",
],
)
for project in data.get("projects", []):
convert_dict(
project,
[
"projectCompareType",
"projectPattern",
"branchCompareType",
"branchPattern",
],
)
mapping_obj_type = type(data)
old_format_events = mapping_obj_type(
(key, should_register)
for key, should_register in six.iteritems(data)
if key.startswith("trigger-on-")
)
trigger_on = data.setdefault("trigger-on", [])
if old_format_events:
logger.warning(
"The events: %s; which you used is/are deprecated. "
"Please use 'trigger-on' instead.",
", ".join(old_format_events),
)
if old_format_events and trigger_on:
raise JenkinsJobsException(
"Both, the new format (trigger-on) and old format (trigger-on-*) "
"gerrit events format found. Please use either the new or the old "
"format of trigger events definition."
)
trigger_on.extend(
event_name[len("trigger-on-") :]
for event_name, should_register in six.iteritems(old_format_events)
if should_register
)
for idx, event in enumerate(trigger_on):
if event == "comment-added-event":
trigger_on[idx] = events = mapping_obj_type()
try:
events["comment-added-event"] = mapping_obj_type(
(
("approval-category", data["trigger-approval-category"]),
("approval-value", data["trigger-approval-value"]),
)
)
except KeyError:
raise JenkinsJobsException(
"The comment-added-event trigger requires which approval "
"category and value you want to trigger the job. "
"It should be specified by the approval-category "
"and approval-value properties."
)
def build_gerrit_triggers(xml_parent, data, plugin_ver):
available_simple_triggers = {
"change-abandoned-event": "PluginChangeAbandonedEvent",
"change-merged-event": "PluginChangeMergedEvent",
"change-restored-event": "PluginChangeRestoredEvent",
"draft-published-event": "PluginDraftPublishedEvent",
"patchset-uploaded-event": "PluginPatchsetCreatedEvent",
"patchset-created-event": "PluginPatchsetCreatedEvent",
"private-state-changed-event": "PluginPrivateStateChangedEvent",
"ref-updated-event": "PluginRefUpdatedEvent",
"topic-changed-event": "PluginTopicChangedEvent",
"wip-state-changed-event": "PluginWipStateChangedEvent",
}
tag_namespace = (
"com.sonyericsson.hudson.plugins.gerrit.trigger." "hudsontrigger.events"
)
trigger_on_events = XML.SubElement(xml_parent, "triggerOnEvents")
for event in data.get("trigger-on", []):
if isinstance(event, six.string_types):
tag_name = available_simple_triggers.get(event)
if event == "patchset-uploaded-event":
logger.warning(
"'%s' is deprecated. Use 'patchset-created-event' "
"format instead.",
event,
)
if not tag_name:
known = ", ".join(
available_simple_triggers.keys()
+ ["comment-added-event", "comment-added-contains-event"]
)
msg = (
"The event '%s' under 'trigger-on' is not one of the " "known: %s."
) % (event, known)
raise JenkinsJobsException(msg)
XML.SubElement(trigger_on_events, "%s.%s" % (tag_namespace, tag_name))
else:
if "patchset-created-event" in event.keys():
pce = event["patchset-created-event"]
pc = XML.SubElement(
trigger_on_events,
"%s.%s" % (tag_namespace, "PluginPatchsetCreatedEvent"),
)
mapping = [
("exclude-drafts", "excludeDrafts", False),
("exclude-trivial-rebase", "excludeTrivialRebase", False),
("exclude-no-code-change", "excludeNoCodeChange", False),
("exclude-private", "excludePrivateState", False),
("exclude-wip", "excludeWipState", False),
]
if plugin_ver >= pkg_resources.parse_version("2.32.0"):
mapping.append(
(
"commit-message-contains-regex",
"commitMessageContainsRegEx",
"",
)
)
helpers.convert_mapping_to_xml(pc, pce, mapping, fail_required=True)
if "comment-added-event" in event.keys():
comment_added_event = event["comment-added-event"]
cadded = XML.SubElement(
trigger_on_events,
"%s.%s" % (tag_namespace, "PluginCommentAddedEvent"),
)
mapping = [
("approval-category", "verdictCategory", None),
("approval-value", "commentAddedTriggerApprovalValue", None),
]
helpers.convert_mapping_to_xml(
cadded, comment_added_event, mapping, fail_required=True
)
if "comment-added-contains-event" in event.keys():
comment_added_event = event["comment-added-contains-event"]
caddedc = XML.SubElement(
trigger_on_events,
"%s.%s" % (tag_namespace, "PluginCommentAddedContainsEvent"),
)
XML.SubElement(
caddedc, "commentAddedCommentContains"
).text = comment_added_event["comment-contains-value"]
def build_gerrit_skip_votes(xml_parent, data, plugin_ver):
outcomes = [
("successful", "onSuccessful"),
("failed", "onFailed"),
("unstable", "onUnstable"),
("notbuilt", "onNotBuilt"),
]
if plugin_ver >= pkg_resources.parse_version("2.32.0"):
outcomes.append(("aborted", "onAborted"))
skip_vote_node = XML.SubElement(xml_parent, "skipVote")
skip_vote = data.get("skip-vote", {})
for result_kind, tag_name in outcomes:
setting = skip_vote.get(result_kind, False)
XML.SubElement(skip_vote_node, tag_name).text = str(setting).lower()
def build_cancellation_policy(xml_parent, data, plugin_ver):
if plugin_ver >= pkg_resources.parse_version("2.32.0"):
options = [
("abort-new-patchsets", "abortNewPatchsets"),
("abort-manual-patchsets", "abortManualPatchsets"),
("abort-same-topic", "abortSameTopic"),
]
build_cancellation_policy_node = XML.SubElement(
xml_parent, "buildCancellationPolicy"
)
build_cancellation_policy_object = data.get("build-cancellation-policy", {})
XML.SubElement(build_cancellation_policy_node, "enabled").text = "true"
for tag, tag_name in options:
setting = build_cancellation_policy_object.get(tag, False)
XML.SubElement(build_cancellation_policy_node, tag_name).text = str(
setting
).lower()
def build_gerrit_parameter_modes(xml_parent, data, plugin_ver):
if plugin_ver < pkg_resources.parse_version("2.18.0"):
for parameter_name in (
"commit-message",
"name-and-email",
"change-subject",
"comment-text",
):
parameter_mode = "{}-parameter-mode".format(parameter_name)
if parameter_mode in data:
logger.warning(
"Gerrit Trigger property '{}' is not supported in this "
"plugin version".format(parameter_mode)
)
deprecated_mappings = (
("no-name-and-email", "noNameAndEmailParameters", False),
("readable-message", "readableMessage", False),
)
helpers.convert_mapping_to_xml(
xml_parent, data, deprecated_mappings, fail_required=True
)
else: # version >= 2.18.0
readable_message = data.get("readable-message")
if readable_message is not None:
logger.warning("Gerrit Trigger property 'readable-message' is deprecated")
no_name_and_email = data.get("no-name-and-email")
if no_name_and_email is not None:
logger.warning("Gerrit Trigger property 'no-name-and-email' is deprecated")
allowed_parameter_modes = ["NONE", "PLAIN", "BASE64"]
new_mappings = (
(
"commit-message-parameter-mode",
"commitMessageParameterMode",
"BASE64" if readable_message is not True else "PLAIN",
allowed_parameter_modes,
),
(
"name-and-email-parameter-mode",
"nameAndEmailParameterMode",
"PLAIN" if no_name_and_email is not True else "NONE",
allowed_parameter_modes,
),
(
"change-subject-parameter-mode",
"changeSubjectParameterMode",
"PLAIN",
allowed_parameter_modes,
),
(
"comment-text-parameter-mode",
"commentTextParameterMode",
"BASE64",
allowed_parameter_modes,
),
)
helpers.convert_mapping_to_xml(
xml_parent, data, new_mappings, fail_required=True
)
def gerrit(registry, xml_parent, data):
"""yaml: gerrit
Trigger on a Gerrit event.
Requires the Jenkins :jenkins-plugins:`Gerrit Trigger Plugin
<gerrit-trigger>` version >= 2.6.0.
:arg list trigger-on: Events to react on. Please use either the new
**trigger-on**, or the old **trigger-on-*** events definitions. You
cannot use both at once.
.. _trigger_on:
:Trigger on:
* **patchset-created-event** (`dict`) -- Trigger upon patchset
creation.
:Patchset created:
* **exclude-drafts** (`bool`) -- exclude drafts (default false)
* **exclude-trivial-rebase** (`bool`) -- exclude trivial rebase
(default false)
* **exclude-no-code-change** (`bool`) -- exclude no code change
(default false)
* **exclude-private** (`bool`) -- exclude private change
(default false)
* **exclude-wip** (`bool`) -- exclude wip change
(default false)
* **commit-message-contains-regex** (`str`) -- Commit message
contains regular expression. (default '')
Requires Gerrit Trigger Plugin >= 2.32.0
exclude-private|exclude-wip needs
Gerrit Trigger v2.29.0
Exclude drafts|trivial-rebase|no-code-change needs
Gerrit Trigger v2.12.0
* **patchset-uploaded-event** -- Trigger upon patchset creation
(this is a alias for `patchset-created-event`).
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
* **change-abandoned-event** -- Trigger on patchset abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0.
* **change-merged-event** -- Trigger on change merged
* **change-restored-event** -- Trigger on change restored. Requires
Gerrit Trigger Plugin version >= 2.8.0
* **draft-published-event** -- Trigger on draft published event.
* **ref-updated-event** -- Trigger on ref-updated.
Gerrit Trigger Plugin version >= 2.29.0
* **topic-changed-event** -- Trigger on topic-changed.
Gerrit Trigger Plugin version >= 2.26.0
* **private-state-changed-event** -- Trigger on private state changed event.
* **wip-state-changed-event** -- Trigger on wip state changed event.
Gerrit Trigger Plugin version >= 2.8.0
* **comment-added-event** (`dict`) -- Trigger on comment added.
:Comment added:
* **approval-category** (`str`) -- Approval (verdict) category
(for example 'APRV', 'CRVW', 'VRIF' -- see `Gerrit access
control
<https://gerrit-review.googlesource.com/Documentation/
access-control.html#access_categories>`_
* **approval-value** -- Approval value for the comment added.
* **comment-added-contains-event** (`dict`) -- Trigger on comment
added contains Regular Expression.
:Comment added contains:
* **comment-contains-value** (`str`) -- Comment contains
Regular Expression value.
:arg bool trigger-on-patchset-uploaded-event: Trigger on patchset upload.
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-abandoned-event: Trigger on change abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-merged-event: Trigger on change merged
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-restored-event: Trigger on change restored.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-comment-added-event: Trigger on comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-draft-published-event: Trigger on draft published
event
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-ref-updated-event: Trigger on ref-updated
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg str trigger-approval-category: Approval category for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg int trigger-approval-value: Approval value for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool override-votes: Override default vote values
:arg int gerrit-build-started-verified-value: Started ''Verified'' value
:arg int gerrit-build-successful-verified-value: Successful ''Verified''
value
:arg int gerrit-build-failed-verified-value: Failed ''Verified'' value
:arg int gerrit-build-unstable-verified-value: Unstable ''Verified'' value
:arg int gerrit-build-notbuilt-verified-value: Not built ''Verified''
value
:arg int gerrit-build-aborted-verified-value: Aborted ''Verified'' value
Requires Gerrit Trigger Plugin version >= 2.31.0
:arg int gerrit-build-started-codereview-value: Started ''CodeReview''
value
:arg int gerrit-build-successful-codereview-value: Successful
''CodeReview'' value
:arg int gerrit-build-failed-codereview-value: Failed ''CodeReview'' value
:arg int gerrit-build-unstable-codereview-value: Unstable ''CodeReview''
value
:arg int gerrit-build-notbuilt-codereview-value: Not built ''CodeReview''
value
:arg int gerrit-build-aborted-codereview-value: Aborted ''CodeReview''
value
Requires Gerrit Trigger Plugin version >= 2.31.0
:arg str failure-message: Message to leave on failure (default '')
:arg str successful-message: Message to leave on success (default '')
:arg str unstable-message: Message to leave when unstable (default '')
:arg str notbuilt-message: Message to leave when not built (default '')
:arg str aborted-message: Message to leave when aborted (default '')
:arg str failure-message-file: Sets the filename within the workspace from
which to retrieve the unsuccessful review message. (optional)
:arg list projects: list of projects to match
:Project: * **project-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP''
* **project-pattern** (`str`) -- Project name pattern to match
* **branch-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP'' (not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
* **branch-pattern** (`str`) -- Branch name pattern to match
(not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
.. _branches:
* **branches** (`list`) -- List of branches to match
(optional)
:Branch: * **branch-compare-type** (`str`) -- ''PLAIN'',
''ANT'' or ''REG_EXP'' (optional) (default
''PLAIN'')
* **branch-pattern** (`str`) -- Branch name pattern
to match
* **file-paths** (`list`) -- List of file paths to match
(optional)
:File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT''
or ''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **forbidden-file-paths** (`list`) -- List of file paths to
skip triggering (optional)
:Forbidden File Path: * **compare-type** (`str`) --
''PLAIN'', ''ANT'' or ''REG_EXP'' (optional)
(default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **topics** (`list`) -- List of topics to match
(optional)
:Topic: * **compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- Topic name pattern to
match
* **disable-strict-forbidden-file-verification** (`bool`) --
Enabling this option will allow an event to trigger a
build if the event contains BOTH one or more wanted file
paths AND one or more forbidden file paths. In other
words, with this option, the build will not get
triggered if the change contains only forbidden files,
otherwise it will get triggered. Requires plugin
version >= 2.16.0 (default false)
:arg dict skip-vote: map of build outcomes for which Jenkins must skip
vote. Requires Gerrit Trigger Plugin version >= 2.7.0
:Outcome: * **successful** (`bool`)
* **failed** (`bool`)
* **unstable** (`bool`)
* **notbuilt** (`bool`)
* **aborted** (`bool`) -- Requires Gerrit Trigger Plugin version >= 2.31.0
:arg bool silent: When silent mode is on there will be no communication
back to Gerrit, i.e. no build started/failed/successful approve
messages etc. If other non-silent jobs are triggered by the same
Gerrit event as this job, the result of this job's build will not be
counted in the end result of the other jobs. (default false)
:arg bool silent-start: Sets silent start mode to on or off. When silent
start mode is on there will be no 'build started' messages sent back
to Gerrit. (default false)
:arg bool escape-quotes: escape quotes in the values of Gerrit change
parameters (default true)
:arg dict build-cancellation-policy: If used, rules regarding
cancellation of builds can be set with this option when
patchsets of the same change comes in. This setting overrides global
server configuration. If build-cancellation-policy is not present in
YAML the global server configuration is used.
Requires Gerrit Trigger Plugin version >= 2.32.0
:Options: * **abort-new-patchsets** (`bool`) -- Only running jobs
will be cancelled if a new patch version is pushed over
(default false).
* **abort-manual-patchsets** (`bool`) -- Builds triggered
manually will be aborted when a new patch set arrives
(default false).
* **abort-same-topic** (`bool`) -- Builds triggered with
topic will be aborted when a new patch set with the
same topic arrives (default false).
:arg bool no-name-and-email: Do not pass compound 'name and email'
parameters (default false)
.. deprecated:: 3.5.0 Please use `name-and-email-parameter-mode`
parameter.
:arg bool readable-message: If parameters regarding multiline text,
e.g. commit message, should be as human readable or not. If false,
those parameters are Base64 encoded to keep environment variables
clean. (default false)
.. deprecated:: 3.5.0 Please use `commit-message-parameter-mode`
parameter.
:arg str name-and-email-parameter-mode: The parameter mode for the compound
"name and email" parameters (like GERRIT_PATCHSET_UPLOADER or
GERRIT_CHANGE_OWNER). This can either be 'NONE' to avoid passing the
parameter all together, 'PLAIN' to pass the parameter in human readable
form, or 'BASE64' to pass the parameter in base64 encoded form (default
'PLAIN'). Requires Gerrit Trigger Plugin version >= 2.18.0.
:arg str commit-message-parameter-mode: The parameter mode for the
GERRIT_CHANGE_COMMIT_MESSAGE parameter. This can either be 'NONE' to
avoid passing the parameter all together, 'PLAIN' to pass the parameter
in human readable form, or 'BASE64' to pass the parameter in base64
encoded form (default 'BASE64'). Requires Gerrit Trigger Plugin version
>= 2.18.0.
:arg str change-subject-parameter-mode: The parameter mode for the
GERRIT_CHANGE_SUBJECT parameter. This can either be 'NONE' to avoid
passing the parameter all together, 'PLAIN' to pass the parameter in
human readable form, or 'BASE64' to pass the parameter in base64
encoded form (default 'PLAIN'). Requires Gerrit Trigger Plugin version
>= 2.18.0.
:arg str comment-text-parameter-mode: The parameter mode for the
GERRIT_EVENT_COMMENT_TEXT parameter. This can either be 'NONE' to avoid
passing the parameter all together, 'PLAIN' to pass the parameter in
human readable form, or 'BASE64' to pass the parameter in base64
encoded form (default 'BASE64'). Requires Gerrit Trigger Plugin version
>= 2.18.0.
:arg str dependency-jobs: All jobs on which this job depends. If a commit
should trigger both a dependency and this job, the dependency will be
built first. Use commas to separate job names. Beware of cyclic
dependencies. (optional)
:arg str notification-level: Defines to whom email notifications should be
sent. This can either be nobody ('NONE'), the change owner ('OWNER'),
reviewers and change owner ('OWNER_REVIEWERS'), all interested users
i.e. owning, reviewing, watching, and starring ('ALL') or server
default ('SERVER_DEFAULT'). (default 'SERVER_DEFAULT')
:arg bool dynamic-trigger-enabled: Enable/disable the dynamic trigger
(default false)
:arg str dynamic-trigger-url: if you specify this option, the Gerrit
trigger configuration will be fetched from there on a regular interval
:arg bool trigger-for-unreviewed-patches: trigger patchset-created events
for changes that were uploaded while connection to Gerrit was down
(default false). Requires Gerrit Trigger Plugin version >= 2.11.0.
.. deprecated:: 3.5.0 Supported for Gerrit Trigger Plugin versions
< 2.14.0. See
`Missed Events Playback Feature <https://plugins.jenkins.io/
gerrit-trigger/#plugin-content-missed-events-playback-feature
-available-from-v-2140>`_.
:arg str custom-url: Custom URL for a message sent to Gerrit. Build
details URL will be used if empty. (default '')
:arg str server-name: Name of the server to trigger on, or ''__ANY__'' to
trigger on any configured Gerrit server (default '__ANY__'). Requires
Gerrit Trigger Plugin version >= 2.11.0
You may select one or more Gerrit events upon which to trigger.
You must also supply at least one project and branch, optionally
more. If you select the comment-added trigger, you should also
indicate which approval category and value you want to trigger the
job.
Until version 0.4.0 of Jenkins Job Builder, camelCase keys were used to
configure Gerrit Trigger Plugin, instead of hyphenated-keys. While still
supported, camedCase keys are deprecated and should not be used. Support
for this will be removed after 1.0.0 is released.
Example:
.. literalinclude:: /../../tests/triggers/fixtures/gerrit004.yaml
:language: yaml
"""
def get_compare_type(xml_tag, compare_type):
valid_compare_types = ["PLAIN", "ANT", "REG_EXP"]
if compare_type not in valid_compare_types:
raise InvalidAttributeError(xml_tag, compare_type, valid_compare_types)
return compare_type
gerrit_handle_legacy_configuration(data)
plugin_info = registry.get_plugin_info("Gerrit Trigger")
plugin_ver = pkg_resources.parse_version(
plugin_info.get("version", str(sys.maxsize))
)
projects = data.get("projects", [])
gtrig = XML.SubElement(
xml_parent,
"com.sonyericsson.hudson.plugins.gerrit.trigger." "hudsontrigger.GerritTrigger",
)
XML.SubElement(gtrig, "spec")
gprojects = XML.SubElement(gtrig, "gerritProjects")
for project in projects:
gproj = XML.SubElement(
gprojects,
"com.sonyericsson.hudson.plugins.gerrit."
"trigger.hudsontrigger.data.GerritProject",
)
XML.SubElement(gproj, "compareType").text = get_compare_type(
"project-compare-type", project.get("project-compare-type", "PLAIN")
)
XML.SubElement(gproj, "pattern").text = project["project-pattern"]
branches = XML.SubElement(gproj, "branches")
project_branches = project.get("branches", [])
if "branch-compare-type" in project and "branch-pattern" in project:
warning = (
"branch-compare-type and branch-pattern at project "
"level are deprecated and support will be removed "
"in a later version of Jenkins Job Builder; "
)
if project_branches:
warning += "discarding values and using values from " "branches section"
else:
warning += "please use branches section instead"
logger.warning(warning)
if not project_branches:
project_branches = [
{
"branch-compare-type": project.get("branch-compare-type", "PLAIN"),
"branch-pattern": project["branch-pattern"],
}
]
for branch in project_branches:
gbranch = XML.SubElement(
branches,
"com.sonyericsson.hudson.plugins."
"gerrit.trigger.hudsontrigger.data.Branch",
)
XML.SubElement(gbranch, "compareType").text = get_compare_type(
"branch-compare-type", branch.get("branch-compare-type", "PLAIN")
)
XML.SubElement(gbranch, "pattern").text = branch["branch-pattern"]
project_file_paths = project.get("file-paths", [])
if project_file_paths:
fps_tag = XML.SubElement(gproj, "filePaths")
for file_path in project_file_paths:
fp_tag = XML.SubElement(
fps_tag,
"com.sonyericsson.hudson.plugins."
"gerrit.trigger.hudsontrigger.data."
"FilePath",
)
XML.SubElement(fp_tag, "compareType").text = get_compare_type(
"compare-type", file_path.get("compare-type", "PLAIN")
)
XML.SubElement(fp_tag, "pattern").text = file_path["pattern"]
project_forbidden_file_paths = project.get("forbidden-file-paths", [])
if project_forbidden_file_paths:
ffps_tag = XML.SubElement(gproj, "forbiddenFilePaths")
for forbidden_file_path in project_forbidden_file_paths:
ffp_tag = XML.SubElement(
ffps_tag,
"com.sonyericsson.hudson.plugins."
"gerrit.trigger.hudsontrigger.data."
"FilePath",
)
XML.SubElement(ffp_tag, "compareType").text = get_compare_type(
"compare-type", forbidden_file_path.get("compare-type", "PLAIN")
)
XML.SubElement(ffp_tag, "pattern").text = forbidden_file_path["pattern"]
topics = project.get("topics", [])
if topics:
topics_tag = XML.SubElement(gproj, "topics")
for topic in topics:
topic_tag = XML.SubElement(
topics_tag,
"com.sonyericsson.hudson.plugins."
"gerrit.trigger.hudsontrigger.data."
"Topic",
)
XML.SubElement(topic_tag, "compareType").text = get_compare_type(
"compare-type", topic.get("compare-type", "PLAIN")
)
XML.SubElement(topic_tag, "pattern").text = topic["pattern"]
XML.SubElement(gproj, "disableStrictForbiddenFileVerification").text = str(
project.get("disable-strict-forbidden-file-verification", False)
).lower()
build_gerrit_skip_votes(gtrig, data, plugin_ver)
if "build-cancellation-policy" in data:
build_cancellation_policy(gtrig, data, plugin_ver)
general_mappings = [
("silent", "silentMode", False),
("silent-start", "silentStartMode", False),
("escape-quotes", "escapeQuotes", True),
("dependency-jobs", "dependencyJobsNames", ""),
]
helpers.convert_mapping_to_xml(gtrig, data, general_mappings, fail_required=True)
build_gerrit_parameter_modes(gtrig, data, plugin_ver)
notification_levels = ["NONE", "OWNER", "OWNER_REVIEWERS", "ALL", "SERVER_DEFAULT"]
notification_level = data.get("notification-level", "SERVER_DEFAULT")
if notification_level not in notification_levels:
raise InvalidAttributeError(
"notification-level", notification_level, notification_levels
)
if notification_level == "SERVER_DEFAULT":
XML.SubElement(gtrig, "notificationLevel").text = ""
else:
XML.SubElement(gtrig, "notificationLevel").text = notification_level
XML.SubElement(gtrig, "dynamicTriggerConfiguration").text = str(
data.get("dynamic-trigger-enabled", False)
).lower()
XML.SubElement(gtrig, "triggerConfigURL").text = str(
data.get("dynamic-trigger-url", "")
)
if data.get("dynamic-trigger-enabled", False) is False:
XML.SubElement(gtrig, "dynamicGerritProjects").set("class", "empty-list")
XML.SubElement(gtrig, "triggerInformationAction").text = str(
data.get("trigger-information-action", "")
)
if (plugin_ver >= pkg_resources.parse_version("2.11.0")) and (
plugin_ver < pkg_resources.parse_version("2.14.0")
):
XML.SubElement(gtrig, "allowTriggeringUnreviewedPatches").text = str(
data.get("trigger-for-unreviewed-patches", False)
).lower()
elif "trigger-for-unreviewed-patches" in data:
logger.warning(
"Gerrit Trigger property 'trigger-for-unreviewed-patches' is not "
"supported in this plugin version"
)
build_gerrit_triggers(gtrig, data, plugin_ver)
override = str(data.get("override-votes", False)).lower()
if override == "true":
votes = [
("gerrit-build-started-verified-value", "gerritBuildStartedVerifiedValue"),
(
"gerrit-build-successful-verified-value",
"gerritBuildSuccessfulVerifiedValue",
),
("gerrit-build-failed-verified-value", "gerritBuildFailedVerifiedValue"),
(
"gerrit-build-unstable-verified-value",
"gerritBuildUnstableVerifiedValue",
),
(
"gerrit-build-notbuilt-verified-value",
"gerritBuildNotBuiltVerifiedValue",
),
(
"gerrit-build-started-codereview-value",
"gerritBuildStartedCodeReviewValue",
),
(
"gerrit-build-successful-codereview-value",
"gerritBuildSuccessfulCodeReviewValue",
),
(
"gerrit-build-failed-codereview-value",
"gerritBuildFailedCodeReviewValue",
),
(
"gerrit-build-unstable-codereview-value",
"gerritBuildUnstableCodeReviewValue",
),
(
"gerrit-build-notbuilt-codereview-value",
"gerritBuildNotBuiltCodeReviewValue",
),
]
if plugin_ver >= pkg_resources.parse_version("2.31.0"):
votes.append(
(
"gerrit-build-aborted-verified-value",
"gerritBuildAbortedVerifiedValue",
)
)
votes.append(
(
"gerrit-build-aborted-codereview-value",
"gerritBuildAbortedCodeReviewValue",
)
)
for yamlkey, xmlkey in votes:
if data.get(yamlkey) is not None:
# str(int(x)) makes input values like '+1' work
XML.SubElement(gtrig, xmlkey).text = str(int(data.get(yamlkey)))
message_mappings = [
("start-message", "buildStartMessage", ""),
("failure-message", "buildFailureMessage", ""),
("successful-message", "buildSuccessfulMessage", ""),
("unstable-message", "buildUnstableMessage", ""),
("notbuilt-message", "buildNotBuiltMessage", ""),
("failure-message-file", "buildUnsuccessfulFilepath", ""),
("custom-url", "customUrl", ""),
("server-name", "serverName", "__ANY__"),
]
if plugin_ver >= pkg_resources.parse_version("2.31.0"):
message_mappings.append(("aborted-message", "buildAbortedMessage", ""))
helpers.convert_mapping_to_xml(gtrig, data, message_mappings, fail_required=True)
def dockerhub_notification(registry, xml_parent, data):
"""yaml: dockerhub-notification
The job will get triggered when Docker Hub/Registry notifies
that Docker image(s) used in this job has been rebuilt.
Requires the Jenkins :jenkins-plugins:`CloudBees Docker Hub Notification
<dockerhub-notification>`.
:arg bool referenced-image: Trigger the job based on repositories
used by any compatible docker plugin in this job. (default true)
:arg list repositories: Specified repositories to trigger the job.
(default [])
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/dockerhub-notification-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/dockerhub-notification-full.yaml
:language: yaml
"""
dockerhub = XML.SubElement(
xml_parent, "org.jenkinsci.plugins." "registry.notification.DockerHubTrigger"
)
dockerhub.set("plugin", "dockerhub-notification")
option = XML.SubElement(dockerhub, "options", {"class": "vector"})
if data.get("referenced-image"):
XML.SubElement(
option,
"org.jenkinsci.plugins."
"registry.notification."
"opt.impl.TriggerForAllUsedInJob",
)
repos = data.get("repositories", [])
if repos:
specified_names = XML.SubElement(
option,
"org.jenkinsci.plugins.registry."
"notification.opt.impl."
"TriggerOnSpecifiedImageNames",
)
repo_tag = XML.SubElement(specified_names, "repoNames")
for repo in repos:
XML.SubElement(repo_tag, "string").text = repo
def pollscm(registry, xml_parent, data):
"""yaml: pollscm
Poll the SCM to determine if there has been a change.
:Parameter: the polling interval (cron syntax)
.. deprecated:: 1.3.0. Please use :ref:`cron <cron>`.
.. _cron:
:arg str cron: the polling interval (cron syntax, required)
:arg bool ignore-post-commit-hooks: Ignore changes notified by SCM
post-commit hooks. The subversion-plugin supports this since
version 1.44. (default false)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollscm002.yaml
:language: yaml
"""
try:
cron = data["cron"]
ipch = str(data.get("ignore-post-commit-hooks", False)).lower()
except KeyError as e:
# ensure specific error on the attribute not being set is raised
# for new format
raise MissingAttributeError(e)
except TypeError:
# To keep backward compatibility
logger.warning(
"Your pollscm usage is deprecated, please use"
" the syntax described in the documentation"
" instead"
)
cron = data
ipch = "false"
if not cron and cron != "":
raise InvalidAttributeError("cron", cron)
scmtrig = XML.SubElement(xml_parent, "hudson.triggers.SCMTrigger")
XML.SubElement(scmtrig, "spec").text = cron
XML.SubElement(scmtrig, "ignorePostCommitHooks").text = ipch
def build_content_type(
xml_parent,
entries,
namespace,
collection_suffix,
entry_suffix,
prefix,
collection_name,
element_name,
):
content_type = XML.SubElement(
xml_parent, "{0}.{1}{2}".format(namespace, prefix, collection_suffix)
)
if entries:
collection = XML.SubElement(content_type, collection_name)
for entry in entries:
content_entry = XML.SubElement(
collection, "{0}.{1}{2}".format(namespace, prefix, entry_suffix)
)
XML.SubElement(content_entry, element_name).text = entry
def pollurl(registry, xml_parent, data):
"""yaml: pollurl
Trigger when the HTTP response from a URL changes.
Requires the Jenkins :jenkins-plugins:`URLTrigger Plugin <urltrigger>`.
:arg str cron: cron syntax of when to run (default '')
:arg str polling-node: Restrict where the polling should run.
(optional)
:arg list urls: List of URLs to monitor
:URL: * **url** (`str`) -- URL to monitor for changes (required)
* **proxy** (`bool`) -- Activate the Jenkins proxy (default false)
* **timeout** (`int`) -- Connect/read timeout in seconds
(default 300)
* **username** (`str`) -- User name for basic authentication
(optional)
* **password** (`str`) -- Password for basic authentication
(optional)
* **check-status** (`int`) -- Check for a specific HTTP status
code (optional)
* **check-etag** (`bool`) -- Check the HTTP ETag for changes
(default false)
* **check-date** (`bool`) -- Check the last modification date of
the URL (default false)
* **check-content** (`list`) -- List of content type changes to
monitor
:Content Type: * **simple** (`bool`) -- Trigger on any change to
the content of the URL (default false)
* **json** (`list`) -- Trigger on any change to
the listed JSON paths
* **text** (`list`) -- Trigger on any change to
the listed regular expressions
* **xml** (`list`) -- Trigger on any change to
the listed XPath expressions
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollurl001.yaml
"""
namespace = "org.jenkinsci.plugins.urltrigger."
valid_content_types = {
"simple": ["Simple", "", "", []],
"json": ["JSON", "jsonPaths", "jsonPath", None],
"text": ["TEXT", "regExElements", "regEx", None],
"xml": ["XML", "xPaths", "xPath", None],
}
urltrig = XML.SubElement(xml_parent, namespace + "URLTrigger")
node = data.get("polling-node")
XML.SubElement(urltrig, "spec").text = data.get("cron", "")
XML.SubElement(urltrig, "labelRestriction").text = str(bool(node)).lower()
if node:
XML.SubElement(urltrig, "triggerLabel").text = node
entries = XML.SubElement(urltrig, "entries")
urls = data.get("urls", [])
if not urls:
raise JenkinsJobsException("At least one url must be provided")
mapping = [
("proxy", "proxyActivated", False),
("timeout", "timeout", 300),
("check-etag", "checkETag", False),
("check-date", "checkLastModificationDate", False),
]
for url in urls:
entry = XML.SubElement(entries, namespace + "URLTriggerEntry")
XML.SubElement(entry, "url").text = url["url"]
if "username" in url:
XML.SubElement(entry, "username").text = url["username"]
if "password" in url:
XML.SubElement(entry, "password").text = url["password"]
if "check-status" in url:
XML.SubElement(entry, "checkStatus").text = "true"
mapping.append(("check-status", "statusCode", ""))
else:
XML.SubElement(entry, "checkStatus").text = "false"
XML.SubElement(entry, "statusCode").text = "200"
helpers.convert_mapping_to_xml(entry, url, mapping, fail_required=False)
check_content = url.get("check-content", [])
XML.SubElement(entry, "inspectingContent").text = str(
bool(check_content)
).lower()
content_types = XML.SubElement(entry, "contentTypes")
for entry in check_content:
type_name = next(iter(entry.keys()))
if type_name not in valid_content_types:
raise JenkinsJobsException(
"check-content must be one of : %s"
% ", ".join(valid_content_types.keys())
)
content_type = valid_content_types.get(type_name)
if entry[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = entry[type_name]
build_content_type(
content_types,
sub_entries,
namespace + "content",
"ContentType",
"ContentEntry",
*content_type[0:3]
)
def jms_messaging(registry, xml_parent, data):
"""yaml: jms-messaging
The JMS Messaging Plugin provides the following functionality:
- A build trigger to submit jenkins jobs upon receipt
of a matching message.
- A builder that may be used to submit a message to the topic
upon the completion of a job
- A post-build action that may be used to submit a message to the topic
upon the completion of a job
JMS Messaging provider types supported:
- ActiveMQ
- FedMsg
Requires the Jenkins :jenkins-plugins:`JMS Messaging Plugin
<jms-messaging>`.
:arg bool no-squash: true = schedule a new job for every triggering message.
(default false)
Normally if a job is queued and another triggering message is received, a new job
is not submitted and the job is "squashed" into the job already queued.
Setting this option to 'True' forces a new job to be submitted for every triggering
message that is received.
:arg str override-topic: If you need to override the default topic.
(default '')
:arg str selector: The JSON or YAML formatted text that conforms to
the schema for defining the various OpenShift resources. (default '')
note: topic needs to be in double quotes
ex. topic = "org.fedoraproject.prod.fedimg.image.upload"
:arg str provider-name: Name of message provider setup in the
global config. (default '')
:arg list checks: List of checks to monitor. (default [])
:arg str field: Check the body of messages for a field. (default '')
:arg str expected-value: Expected value for the field. regex (default '')
Full Example:
.. literalinclude::
../../tests/triggers/fixtures/jms-messaging001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/triggers/fixtures/jms-messaging002.yaml
:language: yaml
"""
namespace = "com.redhat.jenkins.plugins.ci."
jmsm = XML.SubElement(xml_parent, namespace + "CIBuildTrigger")
if "override-topic" in data:
overrides = XML.SubElement(jmsm, "overrides")
XML.SubElement(overrides, "topic").text = str(data["override-topic"])
mapping = [
# option, xml name, default value
("spec", "spec", ""),
("no-squash", "noSquash", False),
("selector", "selector", ""),
("provider-name", "providerName", ""),
]
helpers.convert_mapping_to_xml(jmsm, data, mapping, fail_required=True)
checks = data.get("checks", [])
if len(checks) > 0:
msgchecks = XML.SubElement(jmsm, "checks")
for check in checks:
msgcheck = XML.SubElement(
msgchecks, namespace + "messaging.checks.MsgCheck"
)
mapping = [("field", "field", ""), ("expected-value", "expectedValue", "")]
helpers.convert_mapping_to_xml(msgcheck, check, mapping, fail_required=True)
def timed(registry, xml_parent, data):
"""yaml: timed
Trigger builds at certain times.
:Parameter: when to run the job (cron syntax)
Example::
triggers:
- timed: "@midnight"
"""
scmtrig = XML.SubElement(xml_parent, "hudson.triggers.TimerTrigger")
XML.SubElement(scmtrig, "spec").text = data
def bitbucket(registry, xml_parent, data):
"""yaml: bitbucket
Trigger a job when bitbucket repository is pushed to.
Requires the Jenkins :jenkins-plugins:`BitBucket Plugin
<bitbucket>`.
Example:
.. literalinclude:: /../../tests/triggers/fixtures/bitbucket.yaml
"""
bbtrig = XML.SubElement(
xml_parent, "com.cloudbees.jenkins." "plugins.BitBucketTrigger"
)
XML.SubElement(bbtrig, "spec").text = ""
def github(registry, xml_parent, data):
"""yaml: github
Trigger a job when github repository is pushed to.
Requires the Jenkins :jenkins-plugins:`GitHub Plugin <github>`.
Example::
triggers:
- github
"""
ghtrig = XML.SubElement(xml_parent, "com.cloudbees.jenkins." "GitHubPushTrigger")
XML.SubElement(ghtrig, "spec").text = ""
def github_pull_request(registry, xml_parent, data):
"""yaml: github-pull-request
Build pull requests in github and report results.
Requires the Jenkins :jenkins-plugins:`GitHub Pull Request Builder Plugin
<ghprb>`.
:arg list admin-list: the users with admin rights (optional)
:arg list white-list: users whose pull requests build (optional)
:arg list org-list: orgs whose users should be white listed (optional)
:arg bool allow-whitelist-orgs-as-admins: members of white listed orgs
will have admin rights. (default false)
:arg str cron: cron syntax of when to run (optional)
:arg str trigger-phrase: when filled, commenting this phrase
in the pull request will trigger a build (optional)
:arg bool only-trigger-phrase: only commenting the trigger phrase
in the pull request will trigger a build (default false)
:arg str skip-build-phrase: when filled, adding this phrase to
the pull request title or body will not trigger a build (optional)
:arg list black-list-commit-author: When filled, pull request commits from this user(s)
will not trigger a build (optional)
:arg str black-list-labels: list of GitHub labels for which the build
should not be triggered (optional)
:arg str white-list-labels: list of GitHub labels for which the build
should only be triggered. (Leave blank for 'any') (optional)
:arg bool github-hooks: use github hook (default false)
:arg bool permit-all: build every pull request automatically
without asking (default false)
:arg bool auto-close-on-fail: close failed pull request automatically
(default false)
:arg bool display-build-errors-on-downstream-builds: Display build errors on downstream builds
(default false)
:arg list white-list-target-branches: Adding branches to this whitelist
allows you to selectively test pull requests destined for these
branches only. Supports regular expressions (e.g. 'master',
'feature-.*'). (optional)
:arg list black-list-target-branches: Adding branches to this blacklist
allows you to selectively prevent pull requests builds destined for
these branches. Supports regular expressions (e.g. 'master',
'feature-.*'). (optional)
:arg str auth-id: the auth id to use (optional)
:arg str build-desc-template: the template for build descriptions in
jenkins (optional)
:arg str status-context: the context to include on PR status comments
(optional)
:arg str triggered-status: the status message to set when the build has
been triggered (optional)
:arg str started-status: the status comment to set when the build has
been started (optional)
:arg str status-url: the status URL to set (optional)
:arg bool status-add-test-results: add test result one-liner to status
message (optional)
:arg str success-status: the status message to set if the job succeeds
(optional)
:arg str failure-status: the status message to set if the job fails
(optional)
:arg str error-status: the status message to set if the job errors
(optional)
:arg str success-comment: comment to add to the PR on a successful job
(optional)
:arg str failure-comment: comment to add to the PR on a failed job
(optional)
:arg str error-comment: comment to add to the PR on an errored job
(optional)
:arg bool cancel-builds-on-update: cancel existing builds when a PR is
updated (optional)
:arg str comment-file: Extends the standard build comment message on
github with a custom message file. (optional)
:arg bool no-commit-status: Enables "Do not update commit status"
:arg list included-regions: Each inclusion uses regular expression pattern
matching, and must be separated by a new line. An empty list implies
that everything is included. (optional)
:arg list excluded-regions: Each exclusion uses regular expression pattern
matching, and must be separated by a new line. Exclusions take
precedence over inclusions, if there is an overlap between included
and excluded regions. (optional)
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/github-pull-request-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/github-pull-request-minimal.yaml
:language: yaml
"""
ghprb = XML.SubElement(xml_parent, "org.jenkinsci.plugins.ghprb." "GhprbTrigger")
mapping = [
(
"allow-whitelist-orgs-as-admins",
"allowMembersOfWhitelistedOrgsAsAdmin",
False,
),
("trigger-phrase", "triggerPhrase", ""),
("skip-build-phrase", "skipBuildPhrase", ""),
("only-trigger-phrase", "onlyTriggerPhrase", False),
("github-hooks", "useGitHubHooks", False),
("permit-all", "permitAll", False),
("auto-close-on-fail", "autoCloseFailedPullRequests", False),
(
"display-build-errors-on-downstream-builds",
"displayBuildErrorsOnDownstreamBuilds",
False,
),
]
XML.SubElement(ghprb, "configVersion").text = "3"
cron_string = data.get("cron", "") or ""
XML.SubElement(ghprb, "spec").text = cron_string
XML.SubElement(ghprb, "cron").text = cron_string
admin_string = "\n".join(data.get("admin-list", []))
XML.SubElement(ghprb, "adminlist").text = admin_string
white_string = "\n".join(data.get("white-list", []))
XML.SubElement(ghprb, "whitelist").text = white_string
org_string = "\n".join(data.get("org-list", []))
XML.SubElement(ghprb, "orgslist").text = org_string
black_list_commit_author_string = " ".join(data.get("black-list-commit-author", ""))
XML.SubElement(
ghprb, "blackListCommitAuthor"
).text = black_list_commit_author_string
white_list_labels_string = "\n".join(data.get("white-list-labels", []))
XML.SubElement(ghprb, "whiteListLabels").text = white_list_labels_string
black_list_labels_string = "\n".join(data.get("black-list-labels", []))
XML.SubElement(ghprb, "blackListLabels").text = black_list_labels_string
excluded_regions_string = "\n".join(data.get("excluded-regions", []))
XML.SubElement(ghprb, "excludedRegions").text = excluded_regions_string
included_regions_string = "\n".join(data.get("included-regions", []))
XML.SubElement(ghprb, "includedRegions").text = included_regions_string
build_desc_template = data.get("build-desc-template", "")
if build_desc_template:
XML.SubElement(ghprb, "buildDescTemplate").text = str(build_desc_template)
else:
XML.SubElement(ghprb, "buildDescTemplate")
helpers.convert_mapping_to_xml(ghprb, data, mapping, fail_required=False)
white_list_target_branches = data.get("white-list-target-branches", [])
ghprb_wltb = XML.SubElement(ghprb, "whiteListTargetBranches")
if white_list_target_branches:
for branch in white_list_target_branches:
be = XML.SubElement(
ghprb_wltb, "org.jenkinsci.plugins." "ghprb.GhprbBranch"
)
XML.SubElement(be, "branch").text = str(branch)
black_list_target_branches = data.get("black-list-target-branches", [])
ghprb_bltb = XML.SubElement(ghprb, "blackListTargetBranches")
if black_list_target_branches:
for branch in black_list_target_branches:
be = XML.SubElement(
ghprb_bltb, "org.jenkinsci.plugins." "ghprb.GhprbBranch"
)
XML.SubElement(be, "branch").text = str(branch)
auth_id = data.get("auth-id", "")
if auth_id:
XML.SubElement(ghprb, "gitHubAuthId").text = str(auth_id)
# PR status update fields
status_context = data.get("status-context", "")
triggered_status = data.get("triggered-status", "")
started_status = data.get("started-status", "")
status_url = data.get("status-url", "")
status_add_test_results = data.get("status-add-test-results", False)
success_status = data.get("success-status", "")
failure_status = data.get("failure-status", "")
error_status = data.get("error-status", "")
# is status handling is required?
requires_status = (
status_context
or triggered_status
or started_status
or status_url
or status_add_test_results
or success_status
or failure_status
or error_status
)
# is status message handling required?
requires_status_message = success_status or failure_status or error_status
# is comment handling required?
success_comment = data.get("success-comment", "")
failure_comment = data.get("failure-comment", "")
error_comment = data.get("error-comment", "")
requires_job_comment = success_comment or failure_comment or error_comment
# When the value of cancel-builds-on-update comes from deep_formatter,
# the value is of type 'str', otherwise the value is of type 'bool'
cancel_builds_on_update = (
str(data.get("cancel-builds-on-update", False)).lower() == "true"
)
comment_file = data.get("comment-file", "")
no_commit_status = data.get("no-commit-status", False)
# We want to have only one 'extensions' subelement, even if status
# handling, comment handling and other extensions are enabled.
if (
requires_status
or requires_job_comment
or cancel_builds_on_update
or comment_file
or no_commit_status
):
extensions = XML.SubElement(ghprb, "extensions")
# Both comment and status elements have this same type. Using a const is
# much easier to read than repeating the tokens for this class each time
# it's used
comment_type = "org.jenkinsci.plugins.ghprb.extensions.comments."
comment_type = comment_type + "GhprbBuildResultMessage"
if requires_status:
simple_status = XML.SubElement(
extensions,
"org.jenkinsci.plugins" ".ghprb.extensions.status." "GhprbSimpleStatus",
)
commit_status_context_element = XML.SubElement(
simple_status, "commitStatusContext"
)
triggered_status_element = XML.SubElement(simple_status, "triggeredStatus")
started_status_element = XML.SubElement(simple_status, "startedStatus")
status_url_element = XML.SubElement(simple_status, "statusUrl")
if status_context:
commit_status_context_element.text = str(status_context)
if triggered_status:
triggered_status_element.text = str(triggered_status)
if started_status:
started_status_element.text = str(started_status)
if status_url:
status_url_element.text = str(status_url)
XML.SubElement(simple_status, "addTestResults").text = str(
status_add_test_results
).lower()
if requires_status_message:
completed_elem = XML.SubElement(simple_status, "completedStatus")
if success_status:
success_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(success_elem, "message").text = str(success_status)
XML.SubElement(success_elem, "result").text = "SUCCESS"
if failure_status:
failure_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(failure_elem, "message").text = str(failure_status)
XML.SubElement(failure_elem, "result").text = "FAILURE"
if error_status:
error_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(error_elem, "message").text = str(error_status)
XML.SubElement(error_elem, "result").text = "ERROR"
# job comment handling
if requires_job_comment:
build_status = XML.SubElement(
extensions,
"org.jenkinsci.plugins.ghprb.extensions" ".comments." "GhprbBuildStatus",
)
messages_elem = XML.SubElement(build_status, "messages")
if success_comment:
success_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(success_comment_elem, "message").text = str(success_comment)
XML.SubElement(success_comment_elem, "result").text = "SUCCESS"
if failure_comment:
failure_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(failure_comment_elem, "message").text = str(failure_comment)
XML.SubElement(failure_comment_elem, "result").text = "FAILURE"
if error_comment:
error_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(error_comment_elem, "message").text = str(error_comment)
XML.SubElement(error_comment_elem, "result").text = "ERROR"
if cancel_builds_on_update:
XML.SubElement(
extensions,
"org.jenkinsci.plugins.ghprb.extensions." "build.GhprbCancelBuildsOnUpdate",
)
if comment_file:
comment_file_tag = XML.SubElement(
extensions,
"org.jenkinsci.plugins.ghprb.extensions." "comments.GhprbCommentFile",
)
comment_file_path_elem = XML.SubElement(comment_file_tag, "commentFilePath")
comment_file_path_elem.text = str(comment_file)
if no_commit_status:
XML.SubElement(
extensions,
"org.jenkinsci.plugins.ghprb.extensions." "status.GhprbNoCommitStatus",
)
def gitlab_merge_request(registry, xml_parent, data):
"""yaml: gitlab-merge-request
Build merge requests in gitlab and report results.
Requires the Jenkins :jenkins-plugins:`Gitlab MergeRequest Builder Plugin
<ghprb>`.
:arg str cron: Cron syntax of when to run (required)
:arg str project-path: Gitlab-relative path to project (required)
:arg str target-branch-regex: Allow execution of this job for certain
branches only (default ''). Requires Gitlab MergeRequest Builder
Plugin >= 2.0.0
:arg str use-http-url: Use the HTTP(S) URL to fetch/clone repository
(default false)
:arg str assignee-filter: Only MRs with this assigned user will
trigger the build automatically (default 'jenkins')
:arg str tag-filter: Only MRs with this label will trigger the build
automatically (default 'Build')
:arg str trigger-comment: Force build if this comment is the last
in merge reguest (default '')
:arg str publish-build-progress-messages: Publish build progress
messages (except build failed) (default true)
.. deprecated:: 2.0.0
:arg str auto-close-failed: On failure, auto close the request
(default false)
:arg str auto-merge-passed: On success, auto merge the request
(default false)
Example (version < 2.0.0):
.. literalinclude:: \
/../../tests/triggers/fixtures/gitlab-merge-request001.yaml
Example (version >= 2.0.0):
.. literalinclude:: \
/../../tests/triggers/fixtures/gitlab-merge-request002.yaml
"""
ghprb = XML.SubElement(
xml_parent, "org.jenkinsci.plugins.gitlab." "GitlabBuildTrigger"
)
plugin_info = registry.get_plugin_info("Gitlab Merge Request Builder")
# Note: Assume latest version of plugin is preferred config format
plugin_ver = pkg_resources.parse_version(
plugin_info.get("version", str(sys.maxsize))
)
if plugin_ver >= pkg_resources.parse_version("2.0.0"):
mapping = [
("cron", "spec", None),
("project-path", "projectPath", None),
("target-branch-regex", "targetBranchRegex", ""),
("use-http-url", "useHttpUrl", False),
("assignee-filter", "assigneeFilter", "jenkins"),
("tag-filter", "tagFilter", "Build"),
("trigger-comment", "triggerComment", ""),
("auto-close-failed", "autoCloseFailed", False),
("auto-merge-passed", "autoMergePassed", False),
]
else:
# The plugin version is < 2.0.0
# Because of a design limitation in the GitlabBuildTrigger Jenkins
# plugin both 'spec' and '__cron' have to be set to the same value to
# have them take effect. Also, cron and projectPath are prefixed with
# underscores in the plugin, but spec is not.
mapping = [
("cron", "spec", None),
("cron", "__cron", None),
("project-path", "__projectPath", None),
("use-http-url", "__useHttpUrl", False),
("assignee-filter", "__assigneeFilter", "jenkins"),
("tag-filter", "__tagFilter", "Build"),
("trigger-comment", "__triggerComment", ""),
("publish-build-progress-messages", "__publishBuildProgressMessages", True),
("auto-close-failed", "__autoCloseFailed", False),
("auto-merge-passed", "__autoMergePassed", False),
]
helpers.convert_mapping_to_xml(ghprb, data, mapping, True)
def gitlab(registry, xml_parent, data):
"""yaml: gitlab
Makes Jenkins act like a GitLab CI server.
Requires the Jenkins :jenkins-plugins:`GitLab Plugin <gitlab-plugin>`.
:arg bool trigger-push: Build on Push Events (default true)
:arg bool trigger-merge-request: Build on Merge Request Events (default
true)
:arg bool trigger-accepted-merge-request: Build on Accepted Merge Request
Events (>= 1.4.6) (default false)
:arg bool trigger-closed-merge-request: Build on Closed Merge Request
Events (>= 1.4.6) (default false)
:arg str trigger-open-merge-request-push: Rebuild open Merge Requests
on Push Events.
:trigger-open-merge-request-push values (< 1.1.26):
* **true** (default)
* **false**
:trigger-open-merge-request-push values (>= 1.1.26):
* **never** (default)
* **source**
* **both**
:arg bool trigger-note: Build when comment is added with defined phrase
(>= 1.2.4) (default true)
:arg str note-regex: Phrase that triggers the build (>= 1.2.4) (default
'Jenkins please retry a build')
:arg bool ci-skip: Enable skipping builds of commits that contain
[ci-skip] in the commit message (default true)
:arg bool wip-skip: Enable skipping builds of WIP Merge Requests (>= 1.2.4)
(default true)
:arg bool set-build-description: Set build description to build cause
(eg. Merge request or Git Push) (default true)
:arg bool cancel-pending-builds-on-update: Cancel pending merge request
builds on update (default false)
:arg str pending-build-name: Set the pending merge request build name (optional)
:arg bool add-note-merge-request: Add note with build status on
merge requests (default true)
:arg bool add-vote-merge-request: Vote added to note with build status
on merge requests (>= 1.1.27) (default true)
:arg bool accept-merge-request-on-success: Automatically accept the Merge
Request if the build is successful (>= 1.1.27) (default false)
:arg bool add-ci-message: Add CI build status (1.1.28 - 1.2.0) (default
false)
:arg bool allow-all-branches: Allow all branches (Ignoring Filtered
Branches) (< 1.1.29) (default false)
:arg str branch-filter-type: Filter branches that can trigger a build.
Valid values and their additional attributes are described in the
`branch filter type`_ table (>= 1.1.29) (default 'All').
:arg list include-branches: Defined list of branches to include
(default [])
:arg list exclude-branches: Defined list of branches to exclude
(default [])
:arg str target-branch-regex: Regular expression to select branches
:arg str secret-token: Secret token for build trigger
.. _`branch filter type`:
================== ====================================================
Branch filter type Description
================== ====================================================
All All branches are allowed to trigger this job.
NameBasedFilter Filter branches by name.
List source branches that are allowed to trigger a
build from a Push event or a Merge Request event. If
both fields are left empty, all branches are allowed
to trigger this job. For Merge Request events only
the target branch name is filtered out by the
**include-branches** and **exclude-branches** lists.
RegexBasedFilter Filter branches by regex
The target branch regex allows you to limit the
execution of this job to certain branches. Any
branch matching the specified pattern in
**target-branch-regex** triggers the job. No
filtering is performed if the field is left empty.
================== ====================================================
Example (version < 1.1.26):
.. literalinclude:: /../../tests/triggers/fixtures/gitlab001.yaml
:language: yaml
Minimal example (version >= 1.1.26):
.. literalinclude:: /../../tests/triggers/fixtures/gitlab005.yaml
:language: yaml
Full example (version >= 1.1.26):
.. literalinclude:: /../../tests/triggers/fixtures/gitlab004.yaml
:language: yaml
"""
def _add_xml(elem, name, value):
XML.SubElement(elem, name).text = value
gitlab = XML.SubElement(
xml_parent, "com.dabsquared.gitlabjenkins.GitLabPushTrigger"
)
plugin_info = registry.get_plugin_info("GitLab Plugin")
# Note: Assume latest version of plugin is preferred config format
plugin_ver = pkg_resources.parse_version(
plugin_info.get("version", str(sys.maxsize))
)
valid_merge_request = ["never", "source", "both"]
if plugin_ver >= pkg_resources.parse_version("1.1.26"):
mapping = [
(
"trigger-open-merge-request-push",
"triggerOpenMergeRequestOnPush",
"never",
valid_merge_request,
)
]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
else:
mapping = [
("trigger-open-merge-request-push", "triggerOpenMergeRequestOnPush", True)
]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
if plugin_ver < pkg_resources.parse_version("1.2.0"):
if data.get("branch-filter-type", "") == "All":
data["branch-filter-type"] = ""
valid_filters = ["", "NameBasedFilter", "RegexBasedFilter"]
mapping = [("branch-filter-type", "branchFilterName", "", valid_filters)]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
else:
valid_filters = ["All", "NameBasedFilter", "RegexBasedFilter"]
mapping = [("branch-filter-type", "branchFilterType", "All", valid_filters)]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
XML.SubElement(gitlab, "spec").text = ""
mapping = [
("trigger-push", "triggerOnPush", True),
("trigger-merge-request", "triggerOnMergeRequest", True),
("trigger-accepted-merge-request", "triggerOnAcceptedMergeRequest", False),
("trigger-closed-merge-request", "triggerOnClosedMergeRequest", False),
("trigger-note", "triggerOnNoteRequest", True),
("note-regex", "noteRegex", "Jenkins please retry a build"),
("ci-skip", "ciSkip", True),
("wip-skip", "skipWorkInProgressMergeRequest", True),
("set-build-description", "setBuildDescription", True),
("cancel-pending-builds-on-update", "cancelPendingBuildsOnUpdate", False),
("add-note-merge-request", "addNoteOnMergeRequest", True),
("add-vote-merge-request", "addVoteOnMergeRequest", True),
("accept-merge-request-on-success", "acceptMergeRequestOnSuccess", False),
("add-ci-message", "addCiMessage", False),
("allow-all-branches", "allowAllBranches", False),
("target-branch-regex", "targetBranchRegex", ""),
("secret-token", "secretToken", ""),
]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
list_mapping = (
("include-branches", "includeBranchesSpec", []),
("exclude-branches", "excludeBranchesSpec", []),
)
for yaml_name, xml_name, default_val in list_mapping:
value = ", ".join(data.get(yaml_name, default_val))
_add_xml(gitlab, xml_name, value)
optional_mapping = (("pending-build-name", "pendingBuildName", None),)
helpers.convert_mapping_to_xml(gitlab, data, optional_mapping, fail_required=False)
def gogs(registry, xml_parent, data):
"""yaml: gogs
Trigger a job when gogs repository is pushed to.
Requires the Jenkins :jenkins-plugins:`Gogs Plugin <gogs-webhook>`.
Example:
.. literalinclude::
/../../tests/triggers/fixtures/gogs.yaml
:language: yaml
"""
gogstrig = XML.SubElement(xml_parent, "org.jenkinsci.plugins.gogs.GogsTrigger")
XML.SubElement(gogstrig, "spec")
def build_result(registry, xml_parent, data):
"""yaml: build-result
Configure jobB to monitor jobA build result. A build is scheduled if there
is a new build result that matches your criteria (unstable, failure, ...).
Requires the Jenkins :jenkins-plugins:`BuildResultTrigger Plugin
<buildresult-trigger>`.
:arg list groups: List groups of jobs and results to monitor for
:arg list jobs: The jobs to monitor (required)
:arg list results: Build results to monitor for (default success)
:arg bool combine: Combine all job information. A build will be
scheduled only if all conditions are met (default false)
:arg str cron: The cron syntax with which to poll the jobs for the
supplied result (default '')
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/build-result-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/build-result-minimal.yaml
:language: yaml
"""
brt = XML.SubElement(
xml_parent, "org.jenkinsci.plugins." "buildresulttrigger.BuildResultTrigger"
)
brt.set("plugin", "buildresult-trigger")
mapping = [("cron", "spec", ""), ("combine", "combinedJobs", False)]
helpers.convert_mapping_to_xml(brt, data, mapping, fail_required=True)
jobs_info = XML.SubElement(brt, "jobsInfo")
result_dict = {
"success": "SUCCESS",
"unstable": "UNSTABLE",
"failure": "FAILURE",
"not-built": "NOT_BUILT",
"aborted": "ABORTED",
}
for group in data["groups"]:
brti = XML.SubElement(
jobs_info,
"org.jenkinsci.plugins."
"buildresulttrigger.model."
"BuildResultTriggerInfo",
)
jobs_string = ",".join(group["jobs"])
mapping = [("", "jobNames", jobs_string, group)]
helpers.convert_mapping_to_xml(brti, group, mapping, fail_required=True)
checked_results = XML.SubElement(brti, "checkedResults")
for result in group.get("results", ["success"]):
model_checked = XML.SubElement(
checked_results,
"org.jenkinsci." "plugins.buildresulttrigger.model." "CheckedResult",
)
mapping = [("", "checked", result, result_dict)]
helpers.convert_mapping_to_xml(
model_checked, result_dict, mapping, fail_required=True
)
def reverse(registry, xml_parent, data):
"""yaml: reverse
This trigger can be configured in the UI using the checkbox with the
following text: 'Build after other projects are built'.
Set up a trigger so that when some other projects finish building, a new
build is scheduled for this project. This is convenient for running an
extensive test after a build is complete, for example.
This configuration complements the "Build other projects" section in the
"Post-build Actions" of an upstream project, but is preferable when you
want to configure the downstream project.
:arg str jobs: List of jobs to watch. Can be either a comma separated
list or a list.
:arg str result: Build results to monitor for between the following
options: success, unstable and failure. (default 'success').
Example:
.. literalinclude:: /../../tests/triggers/fixtures/reverse.yaml
Example List:
.. literalinclude:: /../../tests/triggers/fixtures/reverse-list.yaml
"""
reserveBuildTrigger = XML.SubElement(
xml_parent, "jenkins.triggers.ReverseBuildTrigger"
)
supported_thresholds = ["SUCCESS", "UNSTABLE", "FAILURE"]
XML.SubElement(reserveBuildTrigger, "spec").text = ""
jobs = data.get("jobs")
if isinstance(jobs, list):
jobs = ",".join(jobs)
XML.SubElement(reserveBuildTrigger, "upstreamProjects").text = jobs
threshold = XML.SubElement(reserveBuildTrigger, "threshold")
result = str(data.get("result", "success")).upper()
if result not in supported_thresholds:
raise jenkins_jobs.errors.JenkinsJobsException(
"Choice should be one of the following options: %s."
% ", ".join(supported_thresholds)
)
XML.SubElement(threshold, "name").text = hudson_model.THRESHOLDS[result]["name"]
XML.SubElement(threshold, "ordinal").text = hudson_model.THRESHOLDS[result][
"ordinal"
]
XML.SubElement(threshold, "color").text = hudson_model.THRESHOLDS[result]["color"]
XML.SubElement(threshold, "completeBuild").text = str(
hudson_model.THRESHOLDS[result]["complete"]
).lower()
def monitor_folders(registry, xml_parent, data):
"""yaml: monitor-folders
Configure Jenkins to monitor folders.
Requires the Jenkins :jenkins-plugins:`Filesystem Trigger Plugin
<fstrigger>`.
:arg str path: Folder path to poll. (default '')
:arg list includes: Fileset includes setting that specifies the list of
includes files. Basedir of the fileset is relative to the workspace
root. If no value is set, all files are used. (default '')
:arg str excludes: The 'excludes' pattern. A file that matches this mask
will not be polled even if it matches the mask specified in 'includes'
section. (default '')
:arg bool check-modification-date: Check last modification date.
(default true)
:arg bool check-content: Check content. (default true)
:arg bool check-fewer: Check fewer files (default true)
:arg str cron: cron syntax of when to run (default '')
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/monitor-folders-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/monitor-folders-minimal.yaml
:language: yaml
"""
ft = XML.SubElement(
xml_parent, ("org.jenkinsci.plugins.fstrigger." "triggers.FolderContentTrigger")
)
ft.set("plugin", "fstrigger")
mappings = [("path", "path", ""), ("cron", "spec", "")]
helpers.convert_mapping_to_xml(ft, data, mappings, fail_required=True)
includes = data.get("includes", "")
XML.SubElement(ft, "includes").text = ",".join(includes)
XML.SubElement(ft, "excludes").text = data.get("excludes", "")
XML.SubElement(ft, "excludeCheckLastModificationDate").text = str(
not data.get("check-modification-date", True)
).lower()
XML.SubElement(ft, "excludeCheckContent").text = str(
not data.get("check-content", True)
).lower()
XML.SubElement(ft, "excludeCheckFewerOrMoreFiles").text = str(
not data.get("check-fewer", True)
).lower()
def monitor_files(registry, xml_parent, data):
"""yaml: monitor-files
Configure Jenkins to monitor files.
Requires the Jenkins :jenkins-plugins:`Filesystem Trigger Plugin
<fstrigger>`.
:arg list files: List of files to monitor
:File:
* **path** (`str`) -- File path to monitor. You can use a pattern
that specifies a set of files if you don't know the real file
path. (required)
* **strategy** (`str`) -- Choose your strategy if there is more
than one matching file. Can be one of Ignore file ('IGNORE') or
Use the most recent ('LATEST'). (default 'LATEST')
* **check-content** (`list`) -- List of content changes of the
file to monitor
:Content Nature:
* **simple** (`bool`) -- Trigger on change in content of
the specified file (whatever the type file).
(default false)
* **jar** (`bool`) -- Trigger on change in content of the
specified JAR file. (default false)
* **tar** (`bool`) -- Trigger on change in content of the
specified Tar file. (default false)
* **zip** (`bool`) -- Trigger on change in content of the
specified ZIP file. (default false)
* **source-manifest** (`list`) -- Trigger on change to
MANIFEST files.
:MANIFEST File:
* **keys** (`list`) -- List of keys to inspect.
(optional)
* **all-keys** (`bool`) -- If true, take into
account all keys. (default true)
* **jar-manifest** (`list`) -- Trigger on change to
MANIFEST files (contained in jar files).
:MANIFEST File:
* **keys** (`list`) -- List of keys to inspect.
(optional)
* **all-keys** (`bool`) -- If true, take into
account all keys. (default true)
* **properties** (`list`) -- Monitor the contents of the
properties file.
:Properties File:
* **keys** (`list`) -- List of keys to inspect.
(optional)
* **all-keys** (`bool`) -- If true, take into
account all keys. (default true)
* **xml** (`list str`) -- Trigger on change to the listed
XPath expressions.
* **text** (`list str`) -- Trigger on change to the listed
regular expressions.
* **ignore-modificaton-date** (`bool`) -- If true, ignore the file
modification date. Only valid when content changes of the file
are being monitored. (default true)
:arg str cron: cron syntax of when to run (default '')
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/monitor-files-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/monitor-files-full.yaml
:language: yaml
"""
ft_prefix = "org.jenkinsci.plugins.fstrigger.triggers."
valid_strategies = ["LATEST", "IGNORE"]
valid_content_types = {
"simple": ["Simple", "", "", []],
"jar": ["JAR", "", "", []],
"tar": ["Tar", "", "", []],
"zip": ["ZIP", "", "", []],
"source-manifest": ["SourceManifest"],
"jar-manifest": ["JARManifest"],
"properties": ["Properties"],
"xml": ["XML", "expressions", "expression", None],
"text": ["Text", "regexElements", "regex", None],
}
ft = XML.SubElement(xml_parent, ft_prefix + "FileNameTrigger")
XML.SubElement(ft, "spec").text = str(data.get("cron", ""))
files = data.get("files", [])
if not files:
raise JenkinsJobsException("At least one file must be provided")
files_tag = XML.SubElement(ft, "fileInfo")
for file_info in files:
file_tag = XML.SubElement(files_tag, ft_prefix + "FileNameTriggerInfo")
check_content = file_info.get("check-content", [])
files_mapping = [
("path", "filePathPattern", None),
("strategy", "strategy", "LATEST", valid_strategies),
("", "inspectingContentFile", bool(check_content)),
]
helpers.convert_mapping_to_xml(
file_tag, file_info, files_mapping, fail_required=True
)
base_content_tag = XML.SubElement(file_tag, "contentFileTypes")
for content in check_content:
type_name = next(iter(content.keys()))
if type_name not in valid_content_types:
raise InvalidAttributeError(
"check-content", type_name, valid_content_types.keys()
)
content_type = valid_content_types.get(type_name)
if len(content_type) == 1:
class_name = "{0}filecontent.{1}FileContent".format(
ft_prefix, content_type[0]
)
content_data = content.get(type_name)
if not content_data:
raise JenkinsJobsException(
"Need to specify something " "under " + type_name
)
for entry in content_data:
content_tag = XML.SubElement(base_content_tag, class_name)
keys = entry.get("keys", [])
if keys:
XML.SubElement(content_tag, "keys2Inspect").text = ",".join(
keys
)
XML.SubElement(content_tag, "allKeys").text = str(
entry.get("all-keys", True)
).lower()
else:
if content[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = content[type_name]
build_content_type(
base_content_tag,
sub_entries,
ft_prefix + "filecontent",
"FileContent",
"FileContentEntry",
*content_type[0:3]
)
if bool(check_content):
XML.SubElement(file_tag, "doNotCheckLastModificationDate").text = str(
file_info.get("ignore-modificaton-date", True)
).lower()
def ivy(registry, xml_parent, data):
"""yaml: ivy
Poll with an Ivy script.
Requires the Jenkins :jenkins-plugins:`IvyTrigger Plugin
<ivytrigger>`.
:arg str path: Path of the ivy file. (optional)
:arg str settings-path: Ivy Settings Path. (optional)
:arg list str properties-file: List of properties file path. Properties
will be injected as variables in the ivy settings file. (optional)
:arg str properties-content: Properties content. Properties will be
injected as variables in the ivy settings file. (optional)
:arg bool debug: Active debug mode on artifacts resolution. (default false)
:arg download-artifacts: Download artifacts for dependencies to see if they
have changed. (default true)
:arg bool enable-concurrent: Enable Concurrent Build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/ivy.yaml
"""
it = XML.SubElement(xml_parent, "org.jenkinsci.plugins.ivytrigger.IvyTrigger")
mapping = [
("path", "ivyPath", None),
("settings-path", "ivySettingsPath", None),
("properties-content", "propertiesContent", None),
("debug", "debug", False),
("download-artifacts", "downloadArtifacts", True),
("enable-concurrent", "enableConcurrentBuild", False),
("cron", "spec", ""),
]
helpers.convert_mapping_to_xml(it, data, mapping, fail_required=False)
properties_file_path = data.get("properties-file", [])
XML.SubElement(it, "propertiesFilePath").text = ";".join(properties_file_path)
label = data.get("label")
XML.SubElement(it, "labelRestriction").text = str(bool(label)).lower()
if label:
XML.SubElement(it, "triggerLabel").text = label
def script(registry, xml_parent, data):
"""yaml: script
Triggers the job using shell or batch script.
Requires the Jenkins :jenkins-plugins:`ScriptTrigger Plugin <scripttrigger>`.
:arg str label: Restrict where the polling should run. (default '')
:arg str script: A shell or batch script. (default '')
:arg str script-file-path: A shell or batch script path. (default '')
:arg str cron: cron syntax of when to run (default '')
:arg bool enable-concurrent: Enables triggering concurrent builds.
(default false)
:arg int exit-code: If the exit code of the script execution returns this
expected exit code, a build is scheduled. (default 0)
Full Example:
.. literalinclude:: /../../tests/triggers/fixtures/script-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/triggers/fixtures/script-minimal.yaml
:language: yaml
"""
st = XML.SubElement(xml_parent, "org.jenkinsci.plugins.scripttrigger.ScriptTrigger")
st.set("plugin", "scripttrigger")
label = data.get("label")
mappings = [
("script", "script", ""),
("script-file-path", "scriptFilePath", ""),
("cron", "spec", ""),
("enable-concurrent", "enableConcurrentBuild", False),
("exit-code", "exitCode", 0),
("", "labelRestriction", bool(label)),
("", "triggerLabel", label),
]
helpers.convert_mapping_to_xml(st, data, mappings, fail_required=False)
def groovy_script(registry, xml_parent, data):
"""yaml: groovy-script
Triggers the job using a groovy script.
Requires the Jenkins :jenkins-plugins:`ScriptTrigger Plugin
<scripttrigger>`.
:arg bool system-script: If true, run the groovy script as a system script,
the script will have access to the same variables as the Groovy Console.
If false, run the groovy script on the executor node, the script will not
have access to the hudson or job model. (default false)
:arg str script: Content of the groovy script. If the script result is
evaluated to true, a build is scheduled. (default '')
:arg str script-file-path: Groovy script path. (default '')
:arg str property-file-path: Property file path. All properties will be set
as parameters for the triggered build. (default '')
:arg bool enable-concurrent: Enable concurrent build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Full Example:
.. literalinclude:: /../../tests/triggers/fixtures/groovy-script-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/groovy-script-minimal.yaml
:language: yaml
"""
gst = XML.SubElement(
xml_parent, "org.jenkinsci.plugins.scripttrigger.groovy.GroovyScriptTrigger"
)
gst.set("plugin", "scripttrigger")
label = data.get("label")
mappings = [
("system-script", "groovySystemScript", False),
("script", "groovyExpression", ""),
("script-file-path", "groovyFilePath", ""),
("property-file-path", "propertiesFilePath", ""),
("enable-concurrent", "enableConcurrentBuild", False),
("cron", "spec", ""),
("", "labelRestriction", bool(label)),
("", "triggerLabel", label),
]
helpers.convert_mapping_to_xml(gst, data, mappings, fail_required=False)
def rabbitmq(registry, xml_parent, data):
"""yaml: rabbitmq
This plugin triggers build using remote build message in RabbitMQ queue.
Requires the Jenkins :jenkins-plugins:`RabbitMQ Build Trigger Plugin
<rabbitmq-build-trigger>`.
:arg str token: the build token expected in the message queue (required)
:arg list filters: list of filters to apply (optional)
:Filter:
* **field** (`str`) - Some field in message (required)
* **value** (`str`) - value of specified field (required)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/rabbitmq.yaml
:language: yaml
Example with filters:
.. literalinclude:: /../../tests/triggers/fixtures/rabbitmq-filters.yaml
:language: yaml
"""
rabbitmq_prefix = "org.jenkinsci.plugins.rabbitmqbuildtrigger."
rabbitmq = XML.SubElement(xml_parent, rabbitmq_prefix + "RemoteBuildTrigger")
filters = data.get("filters", [])
filter_mapping = [("field", "field", None), ("value", "value", None)]
if filters:
filters_tag = XML.SubElement(rabbitmq, "filters")
for filter_data in filters:
filter_tag = XML.SubElement(filters_tag, rabbitmq_prefix + "Filter")
helpers.convert_mapping_to_xml(
filter_tag, filter_data, filter_mapping, fail_required=True
)
mapping = [("", "spec", ""), ("token", "remoteBuildToken", None)]
helpers.convert_mapping_to_xml(rabbitmq, data, mapping, fail_required=True)
def parameterized_timer(parser, xml_parent, data):
"""yaml: parameterized-timer
Trigger builds with parameters at certain times.
Requires the Jenkins :jenkins-plugins:`Parameterized Scheduler Plugin
<parameterized-scheduler>`.
:arg str cron: cron syntax of when to run and with which parameters
(required)
Example:
.. literalinclude::
/../../tests/triggers/fixtures/parameterized-timer001.yaml
:language: yaml
"""
param_timer = XML.SubElement(
xml_parent,
"org.jenkinsci.plugins.parameterizedscheduler." "ParameterizedTimerTrigger",
)
mapping = [("", "spec", ""), ("cron", "parameterizedSpecification", None)]
helpers.convert_mapping_to_xml(param_timer, data, mapping, fail_required=True)
def jira_changelog(registry, xml_parent, data):
"""yaml: jira-changelog
Sets up a trigger that listens to JIRA issue changes.
Requires the Jenkins :jenkins-plugins:`JIRA Trigger Plugin
<jira-trigger>`.
:arg str jql-filter: Must match updated issues to trigger a build.
(default '')
:arg list changelog-matchers:
:Custom Field Matcher:
* **custom-field-name** (`str`) -- The custom field
name that has been changed during the issue update.
(default '')
* **compare-new-value** (`bool`) -- Compare the
new value of the updated field. (default false)
* **new-value** (`str`) -- The new value of the updated field.
(default '')
* **compare-old-value** (`bool`) -- Compare the
old value of the updated field. (default false)
* **old-value** (`str`) -- The value
before the field is updated. (default '')
:JIRA Field Matcher:
* **jira-field-ID** (`str`) -- The JIRA Field ID that
has been changed during the issue update. (default '')
* **compare-new-value** (`bool`) -- Compare the new value
of the updated field. (default false)
* **new-value** (`str`) -- The new value of the updated field.
(default '')
* **compare-old-value** (`bool`) -- Compare the old value
of the updated field. (default false)
* **old-value** (`str`) -- The value before
the field is updated. (default '')
:arg list parameter-mapping:
:Issue Attribute Path:
* **jenkins-parameter** (`str`) -- Jenkins parameter name
(default '')
* **issue-attribute-path** (`str`) -- Attribute path (default '')
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/jira-changelog-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/jira-changelog-full.yaml
:language: yaml
"""
jcht = XML.SubElement(
xml_parent, "com.ceilfors.jenkins.plugins." "jiratrigger.JiraChangelogTrigger"
)
jcht.set("plugin", "jira-trigger")
mapping = [("jql-filter", "jqlFilter", "")]
helpers.convert_mapping_to_xml(jcht, data, mapping, fail_required=True)
changelog = XML.SubElement(jcht, "changelogMatchers")
mappings = [
("field", "field", ""),
("new-value", "newValue", ""),
("old-value", "oldValue", ""),
("compare-new-value", "comparingNewValue", False),
("compare-old-value", "comparingOldValue", False),
]
for matcher in data.get("changelog-matchers", []):
fieldtype = matcher.get("field-type")
if fieldtype == "CUSTOM":
parent_tag = XML.SubElement(
changelog,
"com.ceilfors.jenkins."
"plugins.jiratrigger.changelog."
"CustomFieldChangelogMatcher",
)
XML.SubElement(parent_tag, "fieldType").text = "CUSTOM"
elif fieldtype == "JIRA":
parent_tag = XML.SubElement(
changelog,
"com.ceilfors.jenkins."
"plugins.jiratrigger.changelog."
"JiraFieldChangelogMatcher",
)
XML.SubElement(parent_tag, "fieldType").text = "JIRA"
helpers.convert_mapping_to_xml(
parent_tag, matcher, mappings, fail_required=True
)
param = XML.SubElement(jcht, "parameterMappings")
parameter_mappings = [
("jenkins-parameter", "jenkinsParameter", ""),
("issue-attribute-path", "issueAttributePath", ""),
]
for parameter in data.get("parameter-mapping", []):
parent = XML.SubElement(
param,
"com.ceilfors.jenkins.plugins."
"jiratrigger.parameter."
"IssueAttributePathParameterMapping",
)
helpers.convert_mapping_to_xml(
parent, parameter, parameter_mappings, fail_required=True
)
def jira_comment_trigger(registry, xml_parent, data):
"""yaml: jira-comment-trigger
Trigger builds when a comment is added to JIRA.
Requires the Jenkins :jenkins-plugins:`JIRA Trigger Plugin
<jira-trigger>`.
:arg str jql-filter: Must match updated issues to trigger a build.
(default '')
:arg str comment-pattern: Triggers build only when the comment added to
JIRA matches pattern (default '(?i)build this please')
:arg list parameter-mapping:
:Issue Attribute Path:
* **jenkins-parameter** (`str`) -- Jenkins parameter name
(default '')
* **issue-attribute-path** (`str`) -- Attribute path (default '')
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/jira-comment-trigger-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/jira-comment-trigger-full.yaml
:language: yaml
"""
jct = XML.SubElement(
xml_parent, "com.ceilfors.jenkins.plugins." "jiratrigger.JiraCommentTrigger"
)
jct.set("plugin", "jira-trigger")
mapping = [
("jql-filter", "jqlFilter", ""),
("comment-pattern", "commentPattern", "(?i)build this please"),
]
helpers.convert_mapping_to_xml(jct, data, mapping, fail_required=True)
param = XML.SubElement(jct, "parameterMappings")
for parameter in data.get("parameter-mapping", []):
parent = XML.SubElement(
param,
"com.ceilfors.jenkins.plugins."
"jiratrigger.parameter."
"IssueAttributePathParameterMapping",
)
parameter_mappings = [
("jenkins-parameter", "jenkinsParameter", ""),
("issue-attribute-path", "issueAttributePath", ""),
]
helpers.convert_mapping_to_xml(
parent, parameter, parameter_mappings, fail_required=True
)
def stash_pull_request(registry, xml_parent, data):
"""yaml: stash-pull-request
Trigger builds via Stash/Bitbucket Server Pull Requests.
Requires the Jenkins :jenkins-plugins:`Stash Pull Request Builder Plugin
<stash-pullrequest-builder>`.
:arg str cron: cron syntax of when to run (required)
:arg str stash-host: The HTTP or HTTPS URL of the Stash host (NOT ssh).
e.g.: https://example.com (required)
:arg str credentials-id: Jenkins credential set to use. (required)
:arg str project: Abbreviated project code. e.g.: PRJ or ~user (required)
:arg str repository: Stash Repository Name. e.g.: Repo (required)
:arg str ci-skip-phrases: CI Skip Phrases. (default 'NO TEST')
:arg str ci-build-phrases: CI Build Phrases. (default 'test this please')
:arg str target-branches: Target branches to filter. (default '')
:arg bool ignore-ssl: Ignore SSL certificates for Stash host.
(default false)
:arg bool check-destination-commit: Rebuild if destination branch
changes. (default false)
:arg bool check-mergable: Build only if PR is mergeable. (default false)
:arg bool merge-on-success: Merge PR if build is successful.
(default false)
:arg bool check-not-conflicted: Build only if Stash reports no conflicts.
(default false)
:arg bool only-build-on-comment: Only build when asked (with test
phrase). (default false)
:arg bool delete-previous-build-finish-comments: Keep PR comment only for
most recent Build. (default false)
:arg bool cancel-outdated-jobs: Cancel outdated jobs. (default false)
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/stash-pull-request-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/stash-pull-request-full.yaml
:language: yaml
"""
pr_trigger = XML.SubElement(
xml_parent, "stashpullrequestbuilder.stashpullrequestbuilder.StashBuildTrigger"
)
pr_trigger.set("plugin", "stash-pullrequest-builder")
mappings = [
("cron", "spec", None), # Spec needs to be set to the same as cron
("cron", "cron", None),
("stash-host", "stashHost", None),
("credentials-id", "credentialsId", None),
("project", "projectCode", None),
("repository", "repositoryName", None),
("ci-skip-phrases", "ciSkipPhrases", "NO TEST"),
("ci-build-phrases", "ciBuildPhrases", "test this please"),
("target-branches", "targetBranchesToBuild", ""),
("ignore-ssl", "ignoreSsl", False),
("check-destination-commit", "checkDestinationCommit", False),
("check-mergable", "checkMergeable", False),
("merge-on-success", "mergeOnSuccess", False),
("check-not-conflicted", "checkNotConflicted", True),
("only-build-on-comment", "onlyBuildOnComment", False),
(
"delete-previous-build-finish-comments",
"deletePreviousBuildFinishComments",
False,
),
("cancel-outdated-jobs", "cancelOutdatedJobsEnabled", False),
]
helpers.convert_mapping_to_xml(pr_trigger, data, mappings, fail_required=True)
def generic_webhook_trigger(registry, xml_parent, data):
"""yaml: generic-webhook-trigger
Generic webhook trigger. Trigger when a set of parameters are submitted.
Requires the Jenkins :jenkins-plugins:`Generic Webhook Trigger
<generic-webhook-trigger>`.
:arg str token: A token to use to trigger the job. (default '')
:arg bool print-post-content: Print post content in job log.
:arg bool print-contrib-var: Print contributed variables in job log.
:arg bool silent-response: Avoid responding with information about
triggered jobs.
:arg str cause: This will be displayed in any triggered job.
:arg str regex-filter-expression: Regular expression to test on the
evaluated text specified in regex-filter-text
:arg str regex-filter-text: Text to test for the given
regexp-filter-expression.
:arg list post-content-params: Parameters to use from posted JSON/XML
:post-content-params: * **type** (`str`) -- JSONPath or XPath
* **key** (`str`) -- Variable name
* **value** (`str`) -- Expression to evaluate in POST content.
Use JSONPath for JSON or XPath for XML.
* **regex-filter** (`str`) -- Anything in the evaluated value,
matching this regular expression, will be removed. (optional)
* **default-value** (`str`) -- This value will be used if
expression does not match anything. (optional)
:arg list request-params: Parameters to use passed in as request arguments
:request-params: * **key** (`str`) -- Name of request parameter
* **regex-filter** (`str`) -- Anything in the evaluated value,
matching this regular expression, will be removed. (optional)
:arg list header-params: Parameters to use passed in as headers
:header-params: * **key** (`str`) -- Name of request header in
lowercase. Resulting variable name has '_' instead of '-'
characters.
* **regex-filter** (`str`) -- Anything in the evaluated value,
matching this regular expression, will be removed. (optional)
Example:
.. literalinclude::
/../../tests/triggers/fixtures/generic-webhook-trigger-full.yaml
"""
namespace = "org.jenkinsci.plugins.gwt."
gwtrig = XML.SubElement(xml_parent, namespace + "GenericTrigger")
gwtrig.set("plugin", "generic-webhook-trigger")
XML.SubElement(gwtrig, "spec")
# Generic Varibles (Post content parameters in UI)
try:
if data.get("post-content-params"):
gen_vars = XML.SubElement(gwtrig, "genericVariables")
mappings = [
("type", "expressionType", "", ["JSONPath", "XPath"]),
("key", "key", ""),
("value", "value", ""),
("regex-filter", "regexpFilter", ""),
("default-value", "defaultValue", ""),
]
for gen_var_list in data.get("post-content-params"):
gen_var_tag = XML.SubElement(gen_vars, namespace + "GenericVariable")
helpers.convert_mapping_to_xml(
gen_var_tag, gen_var_list, mappings, fail_required=True
)
except AttributeError:
pass
# This is dropped here in the middle as that's how the jenkins config is
# done. It probably doesn't need to be, but since this is the first
# swing..
mapping = [
("regex-filter-text", "regexpFilterText", ""),
("regex-filter-expression", "regexpFilterExpression", ""),
]
helpers.convert_mapping_to_xml(gwtrig, data, mapping, fail_required=False)
# Generic Request Variables (Request parameters in UI)
try:
if data.get("request-params"):
gen_req_vars = XML.SubElement(gwtrig, "genericRequestVariables")
mappings = [("key", "key", ""), ("regex-filter", "regexpFilter", "")]
for gen_req_list in data.get("request-params"):
gen_req_tag = XML.SubElement(
gen_req_vars, namespace + "GenericRequestVariable"
)
helpers.convert_mapping_to_xml(
gen_req_tag, gen_req_list, mappings, fail_required=False
)
except AttributeError:
pass
try:
if data.get("header-params"):
gen_header_vars = XML.SubElement(gwtrig, "genericHeaderVariables")
mappings = [("key", "key", ""), ("regex-filter", "regexpFilter", "")]
for gen_header_list in data.get("header-params"):
gen_header_tag = XML.SubElement(
gen_header_vars, namespace + "GenericHeaderVariable"
)
helpers.convert_mapping_to_xml(
gen_header_tag, gen_header_list, mappings, fail_required=False
)
except AttributeError:
pass
mapping = [
("print-post-content", "printPostContent", False),
("print-contrib-var", "printContributedVariables", False),
("cause", "causeString", ""),
("token", "token", ""),
("silent-response", "silentResponse", False),
]
# This should cover all the top level
helpers.convert_mapping_to_xml(gwtrig, data, mapping, fail_required=False)
def artifactory(registry, xml_parent, data):
"""yaml: artifactory
Artifactory trigger. Trigger if files are added or modified in configured
path(s) to watch on chosen Artifactory server.
Requires the Jenkins :jenkins-plugins:`Artifactory Plugin <Artifactory>`.
:arg str artifactory-server: Artifactory server where the configured
path(s) are monitored from. Available Artifactory servers must be
configured on Jenkins Global Configuration in advance. (default '')
:arg str schedule: cron syntax of when to poll. (default '')
:arg str paths: Paths in Artifactory to poll for changes. Multiple
paths can be configured by the ';' separator. (default '')
Example with Single Path to Monitor:
.. literalinclude::
/../../tests/triggers/fixtures/artifactory-trigger-single-path.yaml
:language: yaml
Example with Multiple Paths to Monitor:
.. literalinclude::
/../../tests/triggers/fixtures/artifactory-trigger-multi-path.yaml
:language: yaml
"""
artifactory = XML.SubElement(
xml_parent, "org.jfrog.hudson.trigger.ArtifactoryTrigger"
)
artifactory.set("plugin", "artifactory")
mapping = [
("schedule", "spec", ""),
("paths", "paths", ""),
("", "branches", ""),
("", "lastModified", ""),
]
helpers.convert_mapping_to_xml(artifactory, data, mapping, fail_required=True)
details = XML.SubElement(artifactory, "details")
details_mapping = [
("artifactory-server", "artifactoryName", None),
("", "stagingPlugin", ""),
]
helpers.convert_mapping_to_xml(details, data, details_mapping, fail_required=True)
class Triggers(jenkins_jobs.modules.base.Base):
sequence = 50
component_type = "trigger"
component_list_type = "triggers"
def gen_xml(self, xml_parent, data):
triggers = data.get("triggers", [])
if not triggers:
return
if data.get("project-type", "freestyle") != "pipeline":
trig_e = XML.SubElement(xml_parent, "triggers", {"class": "vector"})
else:
properties = xml_parent.find("properties")
if properties is None:
properties = XML.SubElement(xml_parent, "properties")
pipeline_trig_prop = XML.SubElement(
properties,
"org.jenkinsci.plugins.workflow.job.properties.PipelineTriggersJobProperty",
)
trig_e = XML.SubElement(pipeline_trig_prop, "triggers")
for trigger in triggers:
self.registry.dispatch("trigger", trig_e, trigger)
| 41.338977 | 98 | 0.612093 |
import logging
import pkg_resources
import re
import sys
import xml.etree.ElementTree as XML
import six
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
import jenkins_jobs.modules.helpers as helpers
logger = logging.getLogger(str(__name__))
def gerrit_handle_legacy_configuration(data):
hyphenizer = re.compile("[A-Z]")
def hyphenize(attr):
return hyphenizer.sub(lambda x: "-%s" % x.group(0).lower(), attr)
def convert_dict(d, old_keys):
for old_key in old_keys:
if old_key in d:
new_key = hyphenize(old_key)
logger.warning(
"'%s' is deprecated and will be removed after "
"1.0.0, please use '%s' instead",
old_key,
new_key,
)
d[new_key] = d[old_key]
del d[old_key]
convert_dict(
data,
[
"triggerOnPatchsetUploadedEvent",
"triggerOnChangeAbandonedEvent",
"triggerOnChangeMergedEvent",
"triggerOnChangeRestoredEvent",
"triggerOnCommentAddedEvent",
"triggerOnDraftPublishedEvent",
"triggerOnRefUpdatedEvent",
"triggerApprovalCategory",
"triggerApprovalValue",
"overrideVotes",
"gerritBuildSuccessfulVerifiedValue",
"gerritBuildFailedVerifiedValue",
"failureMessage",
"skipVote",
],
)
for project in data.get("projects", []):
convert_dict(
project,
[
"projectCompareType",
"projectPattern",
"branchCompareType",
"branchPattern",
],
)
mapping_obj_type = type(data)
old_format_events = mapping_obj_type(
(key, should_register)
for key, should_register in six.iteritems(data)
if key.startswith("trigger-on-")
)
trigger_on = data.setdefault("trigger-on", [])
if old_format_events:
logger.warning(
"The events: %s; which you used is/are deprecated. "
"Please use 'trigger-on' instead.",
", ".join(old_format_events),
)
if old_format_events and trigger_on:
raise JenkinsJobsException(
"Both, the new format (trigger-on) and old format (trigger-on-*) "
"gerrit events format found. Please use either the new or the old "
"format of trigger events definition."
)
trigger_on.extend(
event_name[len("trigger-on-") :]
for event_name, should_register in six.iteritems(old_format_events)
if should_register
)
for idx, event in enumerate(trigger_on):
if event == "comment-added-event":
trigger_on[idx] = events = mapping_obj_type()
try:
events["comment-added-event"] = mapping_obj_type(
(
("approval-category", data["trigger-approval-category"]),
("approval-value", data["trigger-approval-value"]),
)
)
except KeyError:
raise JenkinsJobsException(
"The comment-added-event trigger requires which approval "
"category and value you want to trigger the job. "
"It should be specified by the approval-category "
"and approval-value properties."
)
def build_gerrit_triggers(xml_parent, data, plugin_ver):
available_simple_triggers = {
"change-abandoned-event": "PluginChangeAbandonedEvent",
"change-merged-event": "PluginChangeMergedEvent",
"change-restored-event": "PluginChangeRestoredEvent",
"draft-published-event": "PluginDraftPublishedEvent",
"patchset-uploaded-event": "PluginPatchsetCreatedEvent",
"patchset-created-event": "PluginPatchsetCreatedEvent",
"private-state-changed-event": "PluginPrivateStateChangedEvent",
"ref-updated-event": "PluginRefUpdatedEvent",
"topic-changed-event": "PluginTopicChangedEvent",
"wip-state-changed-event": "PluginWipStateChangedEvent",
}
tag_namespace = (
"com.sonyericsson.hudson.plugins.gerrit.trigger." "hudsontrigger.events"
)
trigger_on_events = XML.SubElement(xml_parent, "triggerOnEvents")
for event in data.get("trigger-on", []):
if isinstance(event, six.string_types):
tag_name = available_simple_triggers.get(event)
if event == "patchset-uploaded-event":
logger.warning(
"'%s' is deprecated. Use 'patchset-created-event' "
"format instead.",
event,
)
if not tag_name:
known = ", ".join(
available_simple_triggers.keys()
+ ["comment-added-event", "comment-added-contains-event"]
)
msg = (
"The event '%s' under 'trigger-on' is not one of the " "known: %s."
) % (event, known)
raise JenkinsJobsException(msg)
XML.SubElement(trigger_on_events, "%s.%s" % (tag_namespace, tag_name))
else:
if "patchset-created-event" in event.keys():
pce = event["patchset-created-event"]
pc = XML.SubElement(
trigger_on_events,
"%s.%s" % (tag_namespace, "PluginPatchsetCreatedEvent"),
)
mapping = [
("exclude-drafts", "excludeDrafts", False),
("exclude-trivial-rebase", "excludeTrivialRebase", False),
("exclude-no-code-change", "excludeNoCodeChange", False),
("exclude-private", "excludePrivateState", False),
("exclude-wip", "excludeWipState", False),
]
if plugin_ver >= pkg_resources.parse_version("2.32.0"):
mapping.append(
(
"commit-message-contains-regex",
"commitMessageContainsRegEx",
"",
)
)
helpers.convert_mapping_to_xml(pc, pce, mapping, fail_required=True)
if "comment-added-event" in event.keys():
comment_added_event = event["comment-added-event"]
cadded = XML.SubElement(
trigger_on_events,
"%s.%s" % (tag_namespace, "PluginCommentAddedEvent"),
)
mapping = [
("approval-category", "verdictCategory", None),
("approval-value", "commentAddedTriggerApprovalValue", None),
]
helpers.convert_mapping_to_xml(
cadded, comment_added_event, mapping, fail_required=True
)
if "comment-added-contains-event" in event.keys():
comment_added_event = event["comment-added-contains-event"]
caddedc = XML.SubElement(
trigger_on_events,
"%s.%s" % (tag_namespace, "PluginCommentAddedContainsEvent"),
)
XML.SubElement(
caddedc, "commentAddedCommentContains"
).text = comment_added_event["comment-contains-value"]
def build_gerrit_skip_votes(xml_parent, data, plugin_ver):
outcomes = [
("successful", "onSuccessful"),
("failed", "onFailed"),
("unstable", "onUnstable"),
("notbuilt", "onNotBuilt"),
]
if plugin_ver >= pkg_resources.parse_version("2.32.0"):
outcomes.append(("aborted", "onAborted"))
skip_vote_node = XML.SubElement(xml_parent, "skipVote")
skip_vote = data.get("skip-vote", {})
for result_kind, tag_name in outcomes:
setting = skip_vote.get(result_kind, False)
XML.SubElement(skip_vote_node, tag_name).text = str(setting).lower()
def build_cancellation_policy(xml_parent, data, plugin_ver):
if plugin_ver >= pkg_resources.parse_version("2.32.0"):
options = [
("abort-new-patchsets", "abortNewPatchsets"),
("abort-manual-patchsets", "abortManualPatchsets"),
("abort-same-topic", "abortSameTopic"),
]
build_cancellation_policy_node = XML.SubElement(
xml_parent, "buildCancellationPolicy"
)
build_cancellation_policy_object = data.get("build-cancellation-policy", {})
XML.SubElement(build_cancellation_policy_node, "enabled").text = "true"
for tag, tag_name in options:
setting = build_cancellation_policy_object.get(tag, False)
XML.SubElement(build_cancellation_policy_node, tag_name).text = str(
setting
).lower()
def build_gerrit_parameter_modes(xml_parent, data, plugin_ver):
if plugin_ver < pkg_resources.parse_version("2.18.0"):
for parameter_name in (
"commit-message",
"name-and-email",
"change-subject",
"comment-text",
):
parameter_mode = "{}-parameter-mode".format(parameter_name)
if parameter_mode in data:
logger.warning(
"Gerrit Trigger property '{}' is not supported in this "
"plugin version".format(parameter_mode)
)
deprecated_mappings = (
("no-name-and-email", "noNameAndEmailParameters", False),
("readable-message", "readableMessage", False),
)
helpers.convert_mapping_to_xml(
xml_parent, data, deprecated_mappings, fail_required=True
)
else:
readable_message = data.get("readable-message")
if readable_message is not None:
logger.warning("Gerrit Trigger property 'readable-message' is deprecated")
no_name_and_email = data.get("no-name-and-email")
if no_name_and_email is not None:
logger.warning("Gerrit Trigger property 'no-name-and-email' is deprecated")
allowed_parameter_modes = ["NONE", "PLAIN", "BASE64"]
new_mappings = (
(
"commit-message-parameter-mode",
"commitMessageParameterMode",
"BASE64" if readable_message is not True else "PLAIN",
allowed_parameter_modes,
),
(
"name-and-email-parameter-mode",
"nameAndEmailParameterMode",
"PLAIN" if no_name_and_email is not True else "NONE",
allowed_parameter_modes,
),
(
"change-subject-parameter-mode",
"changeSubjectParameterMode",
"PLAIN",
allowed_parameter_modes,
),
(
"comment-text-parameter-mode",
"commentTextParameterMode",
"BASE64",
allowed_parameter_modes,
),
)
helpers.convert_mapping_to_xml(
xml_parent, data, new_mappings, fail_required=True
)
def gerrit(registry, xml_parent, data):
def get_compare_type(xml_tag, compare_type):
valid_compare_types = ["PLAIN", "ANT", "REG_EXP"]
if compare_type not in valid_compare_types:
raise InvalidAttributeError(xml_tag, compare_type, valid_compare_types)
return compare_type
gerrit_handle_legacy_configuration(data)
plugin_info = registry.get_plugin_info("Gerrit Trigger")
plugin_ver = pkg_resources.parse_version(
plugin_info.get("version", str(sys.maxsize))
)
projects = data.get("projects", [])
gtrig = XML.SubElement(
xml_parent,
"com.sonyericsson.hudson.plugins.gerrit.trigger." "hudsontrigger.GerritTrigger",
)
XML.SubElement(gtrig, "spec")
gprojects = XML.SubElement(gtrig, "gerritProjects")
for project in projects:
gproj = XML.SubElement(
gprojects,
"com.sonyericsson.hudson.plugins.gerrit."
"trigger.hudsontrigger.data.GerritProject",
)
XML.SubElement(gproj, "compareType").text = get_compare_type(
"project-compare-type", project.get("project-compare-type", "PLAIN")
)
XML.SubElement(gproj, "pattern").text = project["project-pattern"]
branches = XML.SubElement(gproj, "branches")
project_branches = project.get("branches", [])
if "branch-compare-type" in project and "branch-pattern" in project:
warning = (
"branch-compare-type and branch-pattern at project "
"level are deprecated and support will be removed "
"in a later version of Jenkins Job Builder; "
)
if project_branches:
warning += "discarding values and using values from " "branches section"
else:
warning += "please use branches section instead"
logger.warning(warning)
if not project_branches:
project_branches = [
{
"branch-compare-type": project.get("branch-compare-type", "PLAIN"),
"branch-pattern": project["branch-pattern"],
}
]
for branch in project_branches:
gbranch = XML.SubElement(
branches,
"com.sonyericsson.hudson.plugins."
"gerrit.trigger.hudsontrigger.data.Branch",
)
XML.SubElement(gbranch, "compareType").text = get_compare_type(
"branch-compare-type", branch.get("branch-compare-type", "PLAIN")
)
XML.SubElement(gbranch, "pattern").text = branch["branch-pattern"]
project_file_paths = project.get("file-paths", [])
if project_file_paths:
fps_tag = XML.SubElement(gproj, "filePaths")
for file_path in project_file_paths:
fp_tag = XML.SubElement(
fps_tag,
"com.sonyericsson.hudson.plugins."
"gerrit.trigger.hudsontrigger.data."
"FilePath",
)
XML.SubElement(fp_tag, "compareType").text = get_compare_type(
"compare-type", file_path.get("compare-type", "PLAIN")
)
XML.SubElement(fp_tag, "pattern").text = file_path["pattern"]
project_forbidden_file_paths = project.get("forbidden-file-paths", [])
if project_forbidden_file_paths:
ffps_tag = XML.SubElement(gproj, "forbiddenFilePaths")
for forbidden_file_path in project_forbidden_file_paths:
ffp_tag = XML.SubElement(
ffps_tag,
"com.sonyericsson.hudson.plugins."
"gerrit.trigger.hudsontrigger.data."
"FilePath",
)
XML.SubElement(ffp_tag, "compareType").text = get_compare_type(
"compare-type", forbidden_file_path.get("compare-type", "PLAIN")
)
XML.SubElement(ffp_tag, "pattern").text = forbidden_file_path["pattern"]
topics = project.get("topics", [])
if topics:
topics_tag = XML.SubElement(gproj, "topics")
for topic in topics:
topic_tag = XML.SubElement(
topics_tag,
"com.sonyericsson.hudson.plugins."
"gerrit.trigger.hudsontrigger.data."
"Topic",
)
XML.SubElement(topic_tag, "compareType").text = get_compare_type(
"compare-type", topic.get("compare-type", "PLAIN")
)
XML.SubElement(topic_tag, "pattern").text = topic["pattern"]
XML.SubElement(gproj, "disableStrictForbiddenFileVerification").text = str(
project.get("disable-strict-forbidden-file-verification", False)
).lower()
build_gerrit_skip_votes(gtrig, data, plugin_ver)
if "build-cancellation-policy" in data:
build_cancellation_policy(gtrig, data, plugin_ver)
general_mappings = [
("silent", "silentMode", False),
("silent-start", "silentStartMode", False),
("escape-quotes", "escapeQuotes", True),
("dependency-jobs", "dependencyJobsNames", ""),
]
helpers.convert_mapping_to_xml(gtrig, data, general_mappings, fail_required=True)
build_gerrit_parameter_modes(gtrig, data, plugin_ver)
notification_levels = ["NONE", "OWNER", "OWNER_REVIEWERS", "ALL", "SERVER_DEFAULT"]
notification_level = data.get("notification-level", "SERVER_DEFAULT")
if notification_level not in notification_levels:
raise InvalidAttributeError(
"notification-level", notification_level, notification_levels
)
if notification_level == "SERVER_DEFAULT":
XML.SubElement(gtrig, "notificationLevel").text = ""
else:
XML.SubElement(gtrig, "notificationLevel").text = notification_level
XML.SubElement(gtrig, "dynamicTriggerConfiguration").text = str(
data.get("dynamic-trigger-enabled", False)
).lower()
XML.SubElement(gtrig, "triggerConfigURL").text = str(
data.get("dynamic-trigger-url", "")
)
if data.get("dynamic-trigger-enabled", False) is False:
XML.SubElement(gtrig, "dynamicGerritProjects").set("class", "empty-list")
XML.SubElement(gtrig, "triggerInformationAction").text = str(
data.get("trigger-information-action", "")
)
if (plugin_ver >= pkg_resources.parse_version("2.11.0")) and (
plugin_ver < pkg_resources.parse_version("2.14.0")
):
XML.SubElement(gtrig, "allowTriggeringUnreviewedPatches").text = str(
data.get("trigger-for-unreviewed-patches", False)
).lower()
elif "trigger-for-unreviewed-patches" in data:
logger.warning(
"Gerrit Trigger property 'trigger-for-unreviewed-patches' is not "
"supported in this plugin version"
)
build_gerrit_triggers(gtrig, data, plugin_ver)
override = str(data.get("override-votes", False)).lower()
if override == "true":
votes = [
("gerrit-build-started-verified-value", "gerritBuildStartedVerifiedValue"),
(
"gerrit-build-successful-verified-value",
"gerritBuildSuccessfulVerifiedValue",
),
("gerrit-build-failed-verified-value", "gerritBuildFailedVerifiedValue"),
(
"gerrit-build-unstable-verified-value",
"gerritBuildUnstableVerifiedValue",
),
(
"gerrit-build-notbuilt-verified-value",
"gerritBuildNotBuiltVerifiedValue",
),
(
"gerrit-build-started-codereview-value",
"gerritBuildStartedCodeReviewValue",
),
(
"gerrit-build-successful-codereview-value",
"gerritBuildSuccessfulCodeReviewValue",
),
(
"gerrit-build-failed-codereview-value",
"gerritBuildFailedCodeReviewValue",
),
(
"gerrit-build-unstable-codereview-value",
"gerritBuildUnstableCodeReviewValue",
),
(
"gerrit-build-notbuilt-codereview-value",
"gerritBuildNotBuiltCodeReviewValue",
),
]
if plugin_ver >= pkg_resources.parse_version("2.31.0"):
votes.append(
(
"gerrit-build-aborted-verified-value",
"gerritBuildAbortedVerifiedValue",
)
)
votes.append(
(
"gerrit-build-aborted-codereview-value",
"gerritBuildAbortedCodeReviewValue",
)
)
for yamlkey, xmlkey in votes:
if data.get(yamlkey) is not None:
XML.SubElement(gtrig, xmlkey).text = str(int(data.get(yamlkey)))
message_mappings = [
("start-message", "buildStartMessage", ""),
("failure-message", "buildFailureMessage", ""),
("successful-message", "buildSuccessfulMessage", ""),
("unstable-message", "buildUnstableMessage", ""),
("notbuilt-message", "buildNotBuiltMessage", ""),
("failure-message-file", "buildUnsuccessfulFilepath", ""),
("custom-url", "customUrl", ""),
("server-name", "serverName", "__ANY__"),
]
if plugin_ver >= pkg_resources.parse_version("2.31.0"):
message_mappings.append(("aborted-message", "buildAbortedMessage", ""))
helpers.convert_mapping_to_xml(gtrig, data, message_mappings, fail_required=True)
def dockerhub_notification(registry, xml_parent, data):
dockerhub = XML.SubElement(
xml_parent, "org.jenkinsci.plugins." "registry.notification.DockerHubTrigger"
)
dockerhub.set("plugin", "dockerhub-notification")
option = XML.SubElement(dockerhub, "options", {"class": "vector"})
if data.get("referenced-image"):
XML.SubElement(
option,
"org.jenkinsci.plugins."
"registry.notification."
"opt.impl.TriggerForAllUsedInJob",
)
repos = data.get("repositories", [])
if repos:
specified_names = XML.SubElement(
option,
"org.jenkinsci.plugins.registry."
"notification.opt.impl."
"TriggerOnSpecifiedImageNames",
)
repo_tag = XML.SubElement(specified_names, "repoNames")
for repo in repos:
XML.SubElement(repo_tag, "string").text = repo
def pollscm(registry, xml_parent, data):
try:
cron = data["cron"]
ipch = str(data.get("ignore-post-commit-hooks", False)).lower()
except KeyError as e:
raise MissingAttributeError(e)
except TypeError:
logger.warning(
"Your pollscm usage is deprecated, please use"
" the syntax described in the documentation"
" instead"
)
cron = data
ipch = "false"
if not cron and cron != "":
raise InvalidAttributeError("cron", cron)
scmtrig = XML.SubElement(xml_parent, "hudson.triggers.SCMTrigger")
XML.SubElement(scmtrig, "spec").text = cron
XML.SubElement(scmtrig, "ignorePostCommitHooks").text = ipch
def build_content_type(
xml_parent,
entries,
namespace,
collection_suffix,
entry_suffix,
prefix,
collection_name,
element_name,
):
content_type = XML.SubElement(
xml_parent, "{0}.{1}{2}".format(namespace, prefix, collection_suffix)
)
if entries:
collection = XML.SubElement(content_type, collection_name)
for entry in entries:
content_entry = XML.SubElement(
collection, "{0}.{1}{2}".format(namespace, prefix, entry_suffix)
)
XML.SubElement(content_entry, element_name).text = entry
def pollurl(registry, xml_parent, data):
namespace = "org.jenkinsci.plugins.urltrigger."
valid_content_types = {
"simple": ["Simple", "", "", []],
"json": ["JSON", "jsonPaths", "jsonPath", None],
"text": ["TEXT", "regExElements", "regEx", None],
"xml": ["XML", "xPaths", "xPath", None],
}
urltrig = XML.SubElement(xml_parent, namespace + "URLTrigger")
node = data.get("polling-node")
XML.SubElement(urltrig, "spec").text = data.get("cron", "")
XML.SubElement(urltrig, "labelRestriction").text = str(bool(node)).lower()
if node:
XML.SubElement(urltrig, "triggerLabel").text = node
entries = XML.SubElement(urltrig, "entries")
urls = data.get("urls", [])
if not urls:
raise JenkinsJobsException("At least one url must be provided")
mapping = [
("proxy", "proxyActivated", False),
("timeout", "timeout", 300),
("check-etag", "checkETag", False),
("check-date", "checkLastModificationDate", False),
]
for url in urls:
entry = XML.SubElement(entries, namespace + "URLTriggerEntry")
XML.SubElement(entry, "url").text = url["url"]
if "username" in url:
XML.SubElement(entry, "username").text = url["username"]
if "password" in url:
XML.SubElement(entry, "password").text = url["password"]
if "check-status" in url:
XML.SubElement(entry, "checkStatus").text = "true"
mapping.append(("check-status", "statusCode", ""))
else:
XML.SubElement(entry, "checkStatus").text = "false"
XML.SubElement(entry, "statusCode").text = "200"
helpers.convert_mapping_to_xml(entry, url, mapping, fail_required=False)
check_content = url.get("check-content", [])
XML.SubElement(entry, "inspectingContent").text = str(
bool(check_content)
).lower()
content_types = XML.SubElement(entry, "contentTypes")
for entry in check_content:
type_name = next(iter(entry.keys()))
if type_name not in valid_content_types:
raise JenkinsJobsException(
"check-content must be one of : %s"
% ", ".join(valid_content_types.keys())
)
content_type = valid_content_types.get(type_name)
if entry[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = entry[type_name]
build_content_type(
content_types,
sub_entries,
namespace + "content",
"ContentType",
"ContentEntry",
*content_type[0:3]
)
def jms_messaging(registry, xml_parent, data):
namespace = "com.redhat.jenkins.plugins.ci."
jmsm = XML.SubElement(xml_parent, namespace + "CIBuildTrigger")
if "override-topic" in data:
overrides = XML.SubElement(jmsm, "overrides")
XML.SubElement(overrides, "topic").text = str(data["override-topic"])
mapping = [
("spec", "spec", ""),
("no-squash", "noSquash", False),
("selector", "selector", ""),
("provider-name", "providerName", ""),
]
helpers.convert_mapping_to_xml(jmsm, data, mapping, fail_required=True)
checks = data.get("checks", [])
if len(checks) > 0:
msgchecks = XML.SubElement(jmsm, "checks")
for check in checks:
msgcheck = XML.SubElement(
msgchecks, namespace + "messaging.checks.MsgCheck"
)
mapping = [("field", "field", ""), ("expected-value", "expectedValue", "")]
helpers.convert_mapping_to_xml(msgcheck, check, mapping, fail_required=True)
def timed(registry, xml_parent, data):
scmtrig = XML.SubElement(xml_parent, "hudson.triggers.TimerTrigger")
XML.SubElement(scmtrig, "spec").text = data
def bitbucket(registry, xml_parent, data):
bbtrig = XML.SubElement(
xml_parent, "com.cloudbees.jenkins." "plugins.BitBucketTrigger"
)
XML.SubElement(bbtrig, "spec").text = ""
def github(registry, xml_parent, data):
ghtrig = XML.SubElement(xml_parent, "com.cloudbees.jenkins." "GitHubPushTrigger")
XML.SubElement(ghtrig, "spec").text = ""
def github_pull_request(registry, xml_parent, data):
ghprb = XML.SubElement(xml_parent, "org.jenkinsci.plugins.ghprb." "GhprbTrigger")
mapping = [
(
"allow-whitelist-orgs-as-admins",
"allowMembersOfWhitelistedOrgsAsAdmin",
False,
),
("trigger-phrase", "triggerPhrase", ""),
("skip-build-phrase", "skipBuildPhrase", ""),
("only-trigger-phrase", "onlyTriggerPhrase", False),
("github-hooks", "useGitHubHooks", False),
("permit-all", "permitAll", False),
("auto-close-on-fail", "autoCloseFailedPullRequests", False),
(
"display-build-errors-on-downstream-builds",
"displayBuildErrorsOnDownstreamBuilds",
False,
),
]
XML.SubElement(ghprb, "configVersion").text = "3"
cron_string = data.get("cron", "") or ""
XML.SubElement(ghprb, "spec").text = cron_string
XML.SubElement(ghprb, "cron").text = cron_string
admin_string = "\n".join(data.get("admin-list", []))
XML.SubElement(ghprb, "adminlist").text = admin_string
white_string = "\n".join(data.get("white-list", []))
XML.SubElement(ghprb, "whitelist").text = white_string
org_string = "\n".join(data.get("org-list", []))
XML.SubElement(ghprb, "orgslist").text = org_string
black_list_commit_author_string = " ".join(data.get("black-list-commit-author", ""))
XML.SubElement(
ghprb, "blackListCommitAuthor"
).text = black_list_commit_author_string
white_list_labels_string = "\n".join(data.get("white-list-labels", []))
XML.SubElement(ghprb, "whiteListLabels").text = white_list_labels_string
black_list_labels_string = "\n".join(data.get("black-list-labels", []))
XML.SubElement(ghprb, "blackListLabels").text = black_list_labels_string
excluded_regions_string = "\n".join(data.get("excluded-regions", []))
XML.SubElement(ghprb, "excludedRegions").text = excluded_regions_string
included_regions_string = "\n".join(data.get("included-regions", []))
XML.SubElement(ghprb, "includedRegions").text = included_regions_string
build_desc_template = data.get("build-desc-template", "")
if build_desc_template:
XML.SubElement(ghprb, "buildDescTemplate").text = str(build_desc_template)
else:
XML.SubElement(ghprb, "buildDescTemplate")
helpers.convert_mapping_to_xml(ghprb, data, mapping, fail_required=False)
white_list_target_branches = data.get("white-list-target-branches", [])
ghprb_wltb = XML.SubElement(ghprb, "whiteListTargetBranches")
if white_list_target_branches:
for branch in white_list_target_branches:
be = XML.SubElement(
ghprb_wltb, "org.jenkinsci.plugins." "ghprb.GhprbBranch"
)
XML.SubElement(be, "branch").text = str(branch)
black_list_target_branches = data.get("black-list-target-branches", [])
ghprb_bltb = XML.SubElement(ghprb, "blackListTargetBranches")
if black_list_target_branches:
for branch in black_list_target_branches:
be = XML.SubElement(
ghprb_bltb, "org.jenkinsci.plugins." "ghprb.GhprbBranch"
)
XML.SubElement(be, "branch").text = str(branch)
auth_id = data.get("auth-id", "")
if auth_id:
XML.SubElement(ghprb, "gitHubAuthId").text = str(auth_id)
status_context = data.get("status-context", "")
triggered_status = data.get("triggered-status", "")
started_status = data.get("started-status", "")
status_url = data.get("status-url", "")
status_add_test_results = data.get("status-add-test-results", False)
success_status = data.get("success-status", "")
failure_status = data.get("failure-status", "")
error_status = data.get("error-status", "")
requires_status = (
status_context
or triggered_status
or started_status
or status_url
or status_add_test_results
or success_status
or failure_status
or error_status
)
requires_status_message = success_status or failure_status or error_status
success_comment = data.get("success-comment", "")
failure_comment = data.get("failure-comment", "")
error_comment = data.get("error-comment", "")
requires_job_comment = success_comment or failure_comment or error_comment
cancel_builds_on_update = (
str(data.get("cancel-builds-on-update", False)).lower() == "true"
)
comment_file = data.get("comment-file", "")
no_commit_status = data.get("no-commit-status", False)
if (
requires_status
or requires_job_comment
or cancel_builds_on_update
or comment_file
or no_commit_status
):
extensions = XML.SubElement(ghprb, "extensions")
comment_type = "org.jenkinsci.plugins.ghprb.extensions.comments."
comment_type = comment_type + "GhprbBuildResultMessage"
if requires_status:
simple_status = XML.SubElement(
extensions,
"org.jenkinsci.plugins" ".ghprb.extensions.status." "GhprbSimpleStatus",
)
commit_status_context_element = XML.SubElement(
simple_status, "commitStatusContext"
)
triggered_status_element = XML.SubElement(simple_status, "triggeredStatus")
started_status_element = XML.SubElement(simple_status, "startedStatus")
status_url_element = XML.SubElement(simple_status, "statusUrl")
if status_context:
commit_status_context_element.text = str(status_context)
if triggered_status:
triggered_status_element.text = str(triggered_status)
if started_status:
started_status_element.text = str(started_status)
if status_url:
status_url_element.text = str(status_url)
XML.SubElement(simple_status, "addTestResults").text = str(
status_add_test_results
).lower()
if requires_status_message:
completed_elem = XML.SubElement(simple_status, "completedStatus")
if success_status:
success_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(success_elem, "message").text = str(success_status)
XML.SubElement(success_elem, "result").text = "SUCCESS"
if failure_status:
failure_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(failure_elem, "message").text = str(failure_status)
XML.SubElement(failure_elem, "result").text = "FAILURE"
if error_status:
error_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(error_elem, "message").text = str(error_status)
XML.SubElement(error_elem, "result").text = "ERROR"
# job comment handling
if requires_job_comment:
build_status = XML.SubElement(
extensions,
"org.jenkinsci.plugins.ghprb.extensions" ".comments." "GhprbBuildStatus",
)
messages_elem = XML.SubElement(build_status, "messages")
if success_comment:
success_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(success_comment_elem, "message").text = str(success_comment)
XML.SubElement(success_comment_elem, "result").text = "SUCCESS"
if failure_comment:
failure_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(failure_comment_elem, "message").text = str(failure_comment)
XML.SubElement(failure_comment_elem, "result").text = "FAILURE"
if error_comment:
error_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(error_comment_elem, "message").text = str(error_comment)
XML.SubElement(error_comment_elem, "result").text = "ERROR"
if cancel_builds_on_update:
XML.SubElement(
extensions,
"org.jenkinsci.plugins.ghprb.extensions." "build.GhprbCancelBuildsOnUpdate",
)
if comment_file:
comment_file_tag = XML.SubElement(
extensions,
"org.jenkinsci.plugins.ghprb.extensions." "comments.GhprbCommentFile",
)
comment_file_path_elem = XML.SubElement(comment_file_tag, "commentFilePath")
comment_file_path_elem.text = str(comment_file)
if no_commit_status:
XML.SubElement(
extensions,
"org.jenkinsci.plugins.ghprb.extensions." "status.GhprbNoCommitStatus",
)
def gitlab_merge_request(registry, xml_parent, data):
ghprb = XML.SubElement(
xml_parent, "org.jenkinsci.plugins.gitlab." "GitlabBuildTrigger"
)
plugin_info = registry.get_plugin_info("Gitlab Merge Request Builder")
# Note: Assume latest version of plugin is preferred config format
plugin_ver = pkg_resources.parse_version(
plugin_info.get("version", str(sys.maxsize))
)
if plugin_ver >= pkg_resources.parse_version("2.0.0"):
mapping = [
("cron", "spec", None),
("project-path", "projectPath", None),
("target-branch-regex", "targetBranchRegex", ""),
("use-http-url", "useHttpUrl", False),
("assignee-filter", "assigneeFilter", "jenkins"),
("tag-filter", "tagFilter", "Build"),
("trigger-comment", "triggerComment", ""),
("auto-close-failed", "autoCloseFailed", False),
("auto-merge-passed", "autoMergePassed", False),
]
else:
# The plugin version is < 2.0.0
# Because of a design limitation in the GitlabBuildTrigger Jenkins
# plugin both 'spec' and '__cron' have to be set to the same value to
# have them take effect. Also, cron and projectPath are prefixed with
# underscores in the plugin, but spec is not.
mapping = [
("cron", "spec", None),
("cron", "__cron", None),
("project-path", "__projectPath", None),
("use-http-url", "__useHttpUrl", False),
("assignee-filter", "__assigneeFilter", "jenkins"),
("tag-filter", "__tagFilter", "Build"),
("trigger-comment", "__triggerComment", ""),
("publish-build-progress-messages", "__publishBuildProgressMessages", True),
("auto-close-failed", "__autoCloseFailed", False),
("auto-merge-passed", "__autoMergePassed", False),
]
helpers.convert_mapping_to_xml(ghprb, data, mapping, True)
def gitlab(registry, xml_parent, data):
def _add_xml(elem, name, value):
XML.SubElement(elem, name).text = value
gitlab = XML.SubElement(
xml_parent, "com.dabsquared.gitlabjenkins.GitLabPushTrigger"
)
plugin_info = registry.get_plugin_info("GitLab Plugin")
# Note: Assume latest version of plugin is preferred config format
plugin_ver = pkg_resources.parse_version(
plugin_info.get("version", str(sys.maxsize))
)
valid_merge_request = ["never", "source", "both"]
if plugin_ver >= pkg_resources.parse_version("1.1.26"):
mapping = [
(
"trigger-open-merge-request-push",
"triggerOpenMergeRequestOnPush",
"never",
valid_merge_request,
)
]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
else:
mapping = [
("trigger-open-merge-request-push", "triggerOpenMergeRequestOnPush", True)
]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
if plugin_ver < pkg_resources.parse_version("1.2.0"):
if data.get("branch-filter-type", "") == "All":
data["branch-filter-type"] = ""
valid_filters = ["", "NameBasedFilter", "RegexBasedFilter"]
mapping = [("branch-filter-type", "branchFilterName", "", valid_filters)]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
else:
valid_filters = ["All", "NameBasedFilter", "RegexBasedFilter"]
mapping = [("branch-filter-type", "branchFilterType", "All", valid_filters)]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
XML.SubElement(gitlab, "spec").text = ""
mapping = [
("trigger-push", "triggerOnPush", True),
("trigger-merge-request", "triggerOnMergeRequest", True),
("trigger-accepted-merge-request", "triggerOnAcceptedMergeRequest", False),
("trigger-closed-merge-request", "triggerOnClosedMergeRequest", False),
("trigger-note", "triggerOnNoteRequest", True),
("note-regex", "noteRegex", "Jenkins please retry a build"),
("ci-skip", "ciSkip", True),
("wip-skip", "skipWorkInProgressMergeRequest", True),
("set-build-description", "setBuildDescription", True),
("cancel-pending-builds-on-update", "cancelPendingBuildsOnUpdate", False),
("add-note-merge-request", "addNoteOnMergeRequest", True),
("add-vote-merge-request", "addVoteOnMergeRequest", True),
("accept-merge-request-on-success", "acceptMergeRequestOnSuccess", False),
("add-ci-message", "addCiMessage", False),
("allow-all-branches", "allowAllBranches", False),
("target-branch-regex", "targetBranchRegex", ""),
("secret-token", "secretToken", ""),
]
helpers.convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
list_mapping = (
("include-branches", "includeBranchesSpec", []),
("exclude-branches", "excludeBranchesSpec", []),
)
for yaml_name, xml_name, default_val in list_mapping:
value = ", ".join(data.get(yaml_name, default_val))
_add_xml(gitlab, xml_name, value)
optional_mapping = (("pending-build-name", "pendingBuildName", None),)
helpers.convert_mapping_to_xml(gitlab, data, optional_mapping, fail_required=False)
def gogs(registry, xml_parent, data):
gogstrig = XML.SubElement(xml_parent, "org.jenkinsci.plugins.gogs.GogsTrigger")
XML.SubElement(gogstrig, "spec")
def build_result(registry, xml_parent, data):
brt = XML.SubElement(
xml_parent, "org.jenkinsci.plugins." "buildresulttrigger.BuildResultTrigger"
)
brt.set("plugin", "buildresult-trigger")
mapping = [("cron", "spec", ""), ("combine", "combinedJobs", False)]
helpers.convert_mapping_to_xml(brt, data, mapping, fail_required=True)
jobs_info = XML.SubElement(brt, "jobsInfo")
result_dict = {
"success": "SUCCESS",
"unstable": "UNSTABLE",
"failure": "FAILURE",
"not-built": "NOT_BUILT",
"aborted": "ABORTED",
}
for group in data["groups"]:
brti = XML.SubElement(
jobs_info,
"org.jenkinsci.plugins."
"buildresulttrigger.model."
"BuildResultTriggerInfo",
)
jobs_string = ",".join(group["jobs"])
mapping = [("", "jobNames", jobs_string, group)]
helpers.convert_mapping_to_xml(brti, group, mapping, fail_required=True)
checked_results = XML.SubElement(brti, "checkedResults")
for result in group.get("results", ["success"]):
model_checked = XML.SubElement(
checked_results,
"org.jenkinsci." "plugins.buildresulttrigger.model." "CheckedResult",
)
mapping = [("", "checked", result, result_dict)]
helpers.convert_mapping_to_xml(
model_checked, result_dict, mapping, fail_required=True
)
def reverse(registry, xml_parent, data):
reserveBuildTrigger = XML.SubElement(
xml_parent, "jenkins.triggers.ReverseBuildTrigger"
)
supported_thresholds = ["SUCCESS", "UNSTABLE", "FAILURE"]
XML.SubElement(reserveBuildTrigger, "spec").text = ""
jobs = data.get("jobs")
if isinstance(jobs, list):
jobs = ",".join(jobs)
XML.SubElement(reserveBuildTrigger, "upstreamProjects").text = jobs
threshold = XML.SubElement(reserveBuildTrigger, "threshold")
result = str(data.get("result", "success")).upper()
if result not in supported_thresholds:
raise jenkins_jobs.errors.JenkinsJobsException(
"Choice should be one of the following options: %s."
% ", ".join(supported_thresholds)
)
XML.SubElement(threshold, "name").text = hudson_model.THRESHOLDS[result]["name"]
XML.SubElement(threshold, "ordinal").text = hudson_model.THRESHOLDS[result][
"ordinal"
]
XML.SubElement(threshold, "color").text = hudson_model.THRESHOLDS[result]["color"]
XML.SubElement(threshold, "completeBuild").text = str(
hudson_model.THRESHOLDS[result]["complete"]
).lower()
def monitor_folders(registry, xml_parent, data):
ft = XML.SubElement(
xml_parent, ("org.jenkinsci.plugins.fstrigger." "triggers.FolderContentTrigger")
)
ft.set("plugin", "fstrigger")
mappings = [("path", "path", ""), ("cron", "spec", "")]
helpers.convert_mapping_to_xml(ft, data, mappings, fail_required=True)
includes = data.get("includes", "")
XML.SubElement(ft, "includes").text = ",".join(includes)
XML.SubElement(ft, "excludes").text = data.get("excludes", "")
XML.SubElement(ft, "excludeCheckLastModificationDate").text = str(
not data.get("check-modification-date", True)
).lower()
XML.SubElement(ft, "excludeCheckContent").text = str(
not data.get("check-content", True)
).lower()
XML.SubElement(ft, "excludeCheckFewerOrMoreFiles").text = str(
not data.get("check-fewer", True)
).lower()
def monitor_files(registry, xml_parent, data):
ft_prefix = "org.jenkinsci.plugins.fstrigger.triggers."
valid_strategies = ["LATEST", "IGNORE"]
valid_content_types = {
"simple": ["Simple", "", "", []],
"jar": ["JAR", "", "", []],
"tar": ["Tar", "", "", []],
"zip": ["ZIP", "", "", []],
"source-manifest": ["SourceManifest"],
"jar-manifest": ["JARManifest"],
"properties": ["Properties"],
"xml": ["XML", "expressions", "expression", None],
"text": ["Text", "regexElements", "regex", None],
}
ft = XML.SubElement(xml_parent, ft_prefix + "FileNameTrigger")
XML.SubElement(ft, "spec").text = str(data.get("cron", ""))
files = data.get("files", [])
if not files:
raise JenkinsJobsException("At least one file must be provided")
files_tag = XML.SubElement(ft, "fileInfo")
for file_info in files:
file_tag = XML.SubElement(files_tag, ft_prefix + "FileNameTriggerInfo")
check_content = file_info.get("check-content", [])
files_mapping = [
("path", "filePathPattern", None),
("strategy", "strategy", "LATEST", valid_strategies),
("", "inspectingContentFile", bool(check_content)),
]
helpers.convert_mapping_to_xml(
file_tag, file_info, files_mapping, fail_required=True
)
base_content_tag = XML.SubElement(file_tag, "contentFileTypes")
for content in check_content:
type_name = next(iter(content.keys()))
if type_name not in valid_content_types:
raise InvalidAttributeError(
"check-content", type_name, valid_content_types.keys()
)
content_type = valid_content_types.get(type_name)
if len(content_type) == 1:
class_name = "{0}filecontent.{1}FileContent".format(
ft_prefix, content_type[0]
)
content_data = content.get(type_name)
if not content_data:
raise JenkinsJobsException(
"Need to specify something " "under " + type_name
)
for entry in content_data:
content_tag = XML.SubElement(base_content_tag, class_name)
keys = entry.get("keys", [])
if keys:
XML.SubElement(content_tag, "keys2Inspect").text = ",".join(
keys
)
XML.SubElement(content_tag, "allKeys").text = str(
entry.get("all-keys", True)
).lower()
else:
if content[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = content[type_name]
build_content_type(
base_content_tag,
sub_entries,
ft_prefix + "filecontent",
"FileContent",
"FileContentEntry",
*content_type[0:3]
)
if bool(check_content):
XML.SubElement(file_tag, "doNotCheckLastModificationDate").text = str(
file_info.get("ignore-modificaton-date", True)
).lower()
def ivy(registry, xml_parent, data):
it = XML.SubElement(xml_parent, "org.jenkinsci.plugins.ivytrigger.IvyTrigger")
mapping = [
("path", "ivyPath", None),
("settings-path", "ivySettingsPath", None),
("properties-content", "propertiesContent", None),
("debug", "debug", False),
("download-artifacts", "downloadArtifacts", True),
("enable-concurrent", "enableConcurrentBuild", False),
("cron", "spec", ""),
]
helpers.convert_mapping_to_xml(it, data, mapping, fail_required=False)
properties_file_path = data.get("properties-file", [])
XML.SubElement(it, "propertiesFilePath").text = ";".join(properties_file_path)
label = data.get("label")
XML.SubElement(it, "labelRestriction").text = str(bool(label)).lower()
if label:
XML.SubElement(it, "triggerLabel").text = label
def script(registry, xml_parent, data):
st = XML.SubElement(xml_parent, "org.jenkinsci.plugins.scripttrigger.ScriptTrigger")
st.set("plugin", "scripttrigger")
label = data.get("label")
mappings = [
("script", "script", ""),
("script-file-path", "scriptFilePath", ""),
("cron", "spec", ""),
("enable-concurrent", "enableConcurrentBuild", False),
("exit-code", "exitCode", 0),
("", "labelRestriction", bool(label)),
("", "triggerLabel", label),
]
helpers.convert_mapping_to_xml(st, data, mappings, fail_required=False)
def groovy_script(registry, xml_parent, data):
gst = XML.SubElement(
xml_parent, "org.jenkinsci.plugins.scripttrigger.groovy.GroovyScriptTrigger"
)
gst.set("plugin", "scripttrigger")
label = data.get("label")
mappings = [
("system-script", "groovySystemScript", False),
("script", "groovyExpression", ""),
("script-file-path", "groovyFilePath", ""),
("property-file-path", "propertiesFilePath", ""),
("enable-concurrent", "enableConcurrentBuild", False),
("cron", "spec", ""),
("", "labelRestriction", bool(label)),
("", "triggerLabel", label),
]
helpers.convert_mapping_to_xml(gst, data, mappings, fail_required=False)
def rabbitmq(registry, xml_parent, data):
rabbitmq_prefix = "org.jenkinsci.plugins.rabbitmqbuildtrigger."
rabbitmq = XML.SubElement(xml_parent, rabbitmq_prefix + "RemoteBuildTrigger")
filters = data.get("filters", [])
filter_mapping = [("field", "field", None), ("value", "value", None)]
if filters:
filters_tag = XML.SubElement(rabbitmq, "filters")
for filter_data in filters:
filter_tag = XML.SubElement(filters_tag, rabbitmq_prefix + "Filter")
helpers.convert_mapping_to_xml(
filter_tag, filter_data, filter_mapping, fail_required=True
)
mapping = [("", "spec", ""), ("token", "remoteBuildToken", None)]
helpers.convert_mapping_to_xml(rabbitmq, data, mapping, fail_required=True)
def parameterized_timer(parser, xml_parent, data):
param_timer = XML.SubElement(
xml_parent,
"org.jenkinsci.plugins.parameterizedscheduler." "ParameterizedTimerTrigger",
)
mapping = [("", "spec", ""), ("cron", "parameterizedSpecification", None)]
helpers.convert_mapping_to_xml(param_timer, data, mapping, fail_required=True)
def jira_changelog(registry, xml_parent, data):
jcht = XML.SubElement(
xml_parent, "com.ceilfors.jenkins.plugins." "jiratrigger.JiraChangelogTrigger"
)
jcht.set("plugin", "jira-trigger")
mapping = [("jql-filter", "jqlFilter", "")]
helpers.convert_mapping_to_xml(jcht, data, mapping, fail_required=True)
changelog = XML.SubElement(jcht, "changelogMatchers")
mappings = [
("field", "field", ""),
("new-value", "newValue", ""),
("old-value", "oldValue", ""),
("compare-new-value", "comparingNewValue", False),
("compare-old-value", "comparingOldValue", False),
]
for matcher in data.get("changelog-matchers", []):
fieldtype = matcher.get("field-type")
if fieldtype == "CUSTOM":
parent_tag = XML.SubElement(
changelog,
"com.ceilfors.jenkins."
"plugins.jiratrigger.changelog."
"CustomFieldChangelogMatcher",
)
XML.SubElement(parent_tag, "fieldType").text = "CUSTOM"
elif fieldtype == "JIRA":
parent_tag = XML.SubElement(
changelog,
"com.ceilfors.jenkins."
"plugins.jiratrigger.changelog."
"JiraFieldChangelogMatcher",
)
XML.SubElement(parent_tag, "fieldType").text = "JIRA"
helpers.convert_mapping_to_xml(
parent_tag, matcher, mappings, fail_required=True
)
param = XML.SubElement(jcht, "parameterMappings")
parameter_mappings = [
("jenkins-parameter", "jenkinsParameter", ""),
("issue-attribute-path", "issueAttributePath", ""),
]
for parameter in data.get("parameter-mapping", []):
parent = XML.SubElement(
param,
"com.ceilfors.jenkins.plugins."
"jiratrigger.parameter."
"IssueAttributePathParameterMapping",
)
helpers.convert_mapping_to_xml(
parent, parameter, parameter_mappings, fail_required=True
)
def jira_comment_trigger(registry, xml_parent, data):
jct = XML.SubElement(
xml_parent, "com.ceilfors.jenkins.plugins." "jiratrigger.JiraCommentTrigger"
)
jct.set("plugin", "jira-trigger")
mapping = [
("jql-filter", "jqlFilter", ""),
("comment-pattern", "commentPattern", "(?i)build this please"),
]
helpers.convert_mapping_to_xml(jct, data, mapping, fail_required=True)
param = XML.SubElement(jct, "parameterMappings")
for parameter in data.get("parameter-mapping", []):
parent = XML.SubElement(
param,
"com.ceilfors.jenkins.plugins."
"jiratrigger.parameter."
"IssueAttributePathParameterMapping",
)
parameter_mappings = [
("jenkins-parameter", "jenkinsParameter", ""),
("issue-attribute-path", "issueAttributePath", ""),
]
helpers.convert_mapping_to_xml(
parent, parameter, parameter_mappings, fail_required=True
)
def stash_pull_request(registry, xml_parent, data):
pr_trigger = XML.SubElement(
xml_parent, "stashpullrequestbuilder.stashpullrequestbuilder.StashBuildTrigger"
)
pr_trigger.set("plugin", "stash-pullrequest-builder")
mappings = [
("cron", "spec", None), # Spec needs to be set to the same as cron
("cron", "cron", None),
("stash-host", "stashHost", None),
("credentials-id", "credentialsId", None),
("project", "projectCode", None),
("repository", "repositoryName", None),
("ci-skip-phrases", "ciSkipPhrases", "NO TEST"),
("ci-build-phrases", "ciBuildPhrases", "test this please"),
("target-branches", "targetBranchesToBuild", ""),
("ignore-ssl", "ignoreSsl", False),
("check-destination-commit", "checkDestinationCommit", False),
("check-mergable", "checkMergeable", False),
("merge-on-success", "mergeOnSuccess", False),
("check-not-conflicted", "checkNotConflicted", True),
("only-build-on-comment", "onlyBuildOnComment", False),
(
"delete-previous-build-finish-comments",
"deletePreviousBuildFinishComments",
False,
),
("cancel-outdated-jobs", "cancelOutdatedJobsEnabled", False),
]
helpers.convert_mapping_to_xml(pr_trigger, data, mappings, fail_required=True)
def generic_webhook_trigger(registry, xml_parent, data):
namespace = "org.jenkinsci.plugins.gwt."
gwtrig = XML.SubElement(xml_parent, namespace + "GenericTrigger")
gwtrig.set("plugin", "generic-webhook-trigger")
XML.SubElement(gwtrig, "spec")
# Generic Varibles (Post content parameters in UI)
try:
if data.get("post-content-params"):
gen_vars = XML.SubElement(gwtrig, "genericVariables")
mappings = [
("type", "expressionType", "", ["JSONPath", "XPath"]),
("key", "key", ""),
("value", "value", ""),
("regex-filter", "regexpFilter", ""),
("default-value", "defaultValue", ""),
]
for gen_var_list in data.get("post-content-params"):
gen_var_tag = XML.SubElement(gen_vars, namespace + "GenericVariable")
helpers.convert_mapping_to_xml(
gen_var_tag, gen_var_list, mappings, fail_required=True
)
except AttributeError:
pass
# This is dropped here in the middle as that's how the jenkins config is
# swing..
mapping = [
("regex-filter-text", "regexpFilterText", ""),
("regex-filter-expression", "regexpFilterExpression", ""),
]
helpers.convert_mapping_to_xml(gwtrig, data, mapping, fail_required=False)
# Generic Request Variables (Request parameters in UI)
try:
if data.get("request-params"):
gen_req_vars = XML.SubElement(gwtrig, "genericRequestVariables")
mappings = [("key", "key", ""), ("regex-filter", "regexpFilter", "")]
for gen_req_list in data.get("request-params"):
gen_req_tag = XML.SubElement(
gen_req_vars, namespace + "GenericRequestVariable"
)
helpers.convert_mapping_to_xml(
gen_req_tag, gen_req_list, mappings, fail_required=False
)
except AttributeError:
pass
try:
if data.get("header-params"):
gen_header_vars = XML.SubElement(gwtrig, "genericHeaderVariables")
mappings = [("key", "key", ""), ("regex-filter", "regexpFilter", "")]
for gen_header_list in data.get("header-params"):
gen_header_tag = XML.SubElement(
gen_header_vars, namespace + "GenericHeaderVariable"
)
helpers.convert_mapping_to_xml(
gen_header_tag, gen_header_list, mappings, fail_required=False
)
except AttributeError:
pass
mapping = [
("print-post-content", "printPostContent", False),
("print-contrib-var", "printContributedVariables", False),
("cause", "causeString", ""),
("token", "token", ""),
("silent-response", "silentResponse", False),
]
# This should cover all the top level
helpers.convert_mapping_to_xml(gwtrig, data, mapping, fail_required=False)
def artifactory(registry, xml_parent, data):
artifactory = XML.SubElement(
xml_parent, "org.jfrog.hudson.trigger.ArtifactoryTrigger"
)
artifactory.set("plugin", "artifactory")
mapping = [
("schedule", "spec", ""),
("paths", "paths", ""),
("", "branches", ""),
("", "lastModified", ""),
]
helpers.convert_mapping_to_xml(artifactory, data, mapping, fail_required=True)
details = XML.SubElement(artifactory, "details")
details_mapping = [
("artifactory-server", "artifactoryName", None),
("", "stagingPlugin", ""),
]
helpers.convert_mapping_to_xml(details, data, details_mapping, fail_required=True)
class Triggers(jenkins_jobs.modules.base.Base):
sequence = 50
component_type = "trigger"
component_list_type = "triggers"
def gen_xml(self, xml_parent, data):
triggers = data.get("triggers", [])
if not triggers:
return
if data.get("project-type", "freestyle") != "pipeline":
trig_e = XML.SubElement(xml_parent, "triggers", {"class": "vector"})
else:
properties = xml_parent.find("properties")
if properties is None:
properties = XML.SubElement(xml_parent, "properties")
pipeline_trig_prop = XML.SubElement(
properties,
"org.jenkinsci.plugins.workflow.job.properties.PipelineTriggersJobProperty",
)
trig_e = XML.SubElement(pipeline_trig_prop, "triggers")
for trigger in triggers:
self.registry.dispatch("trigger", trig_e, trigger)
| true | true |
f7f5ab402543350738c8f26b34c5651f4a0db17e | 1,824 | py | Python | transformer_model/masker.py | zbloss/TransformerModel | da4712fe5631accd22156f129e69c98b4ffe1146 | [
"MIT"
] | 61 | 2019-08-18T20:21:25.000Z | 2021-07-22T03:17:51.000Z | transformer_model/masker.py | zbloss/TransformerModel | da4712fe5631accd22156f129e69c98b4ffe1146 | [
"MIT"
] | 1 | 2019-08-18T20:31:51.000Z | 2019-08-19T01:11:41.000Z | transformer_model/masker.py | zbloss/TransformerModel | da4712fe5631accd22156f129e69c98b4ffe1146 | [
"MIT"
] | 10 | 2019-08-19T05:36:15.000Z | 2021-08-04T14:12:54.000Z | import tensorflow as tf
class Masker(object):
def __init__(self):
"""
This class holds a collection of masking functions that are used across the entire package.
"""
@staticmethod
def create_padding_mask(seq):
"""
:param seq: the sequence to mask
:return: the padding mask in the form of (batch_size, 1, 1, seq_len)
"""
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :]
@staticmethod
def create_look_ahead_mask(size):
"""
:param size:
:return: the mask for hiding unseen values for training purposes in the form of (seq_len, seq_len)
"""
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask
def create_masks(self, inp, tar):
"""
:param self:
:param inp: The feature tensor to mask.
:param tar: The target tensor to mask
:return: the Encoder, Combined, and Decoder masks
"""
# Encoder padding mask
enc_padding_mask = self.create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = self.create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = self.create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = self.create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
| 33.777778 | 106 | 0.63432 | import tensorflow as tf
class Masker(object):
def __init__(self):
@staticmethod
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
return seq[:, tf.newaxis, tf.newaxis, :]
@staticmethod
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask
def create_masks(self, inp, tar):
enc_padding_mask = self.create_padding_mask(inp)
dec_padding_mask = self.create_padding_mask(inp)
look_ahead_mask = self.create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = self.create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
| true | true |
f7f5add40d638e0ae6100797e88af42c838f64a1 | 2,925 | py | Python | tests/ObjectTree.py | PJB3005/ByondToolsv3 | da80d87d474e83f13350acb730173d21e9a6e1ff | [
"MIT"
] | 2 | 2016-11-28T18:26:46.000Z | 2016-11-29T13:27:38.000Z | tests/ObjectTree.py | PJB3005/ByondToolsv3 | da80d87d474e83f13350acb730173d21e9a6e1ff | [
"MIT"
] | 3 | 2016-11-19T04:01:32.000Z | 2020-06-06T12:39:01.000Z | tests/ObjectTree.py | PJB3005/ByondToolsv3 | da80d87d474e83f13350acb730173d21e9a6e1ff | [
"MIT"
] | 2 | 2017-05-18T05:59:23.000Z | 2020-06-06T11:27:59.000Z | '''
Created on Jan 5, 2014
@author: Rob
'''
import unittest
class ObjectTreeTests(unittest.TestCase):
def setUp(self):
from byond.objtree import ObjectTree
self.tree = ObjectTree()
def test_consumeVariable_basics(self):
test_string = 'var/obj/item/weapon/chainsaw = new'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'chainsaw')
self.assertEqual(data.type, '/obj/item/weapon')
self.assertEqual(data.value, 'new')
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_alternate_array_declaration_01(self):
test_string = 'var/appearance_keylist[0]'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'appearance_keylist')
self.assertEqual(data.type, '/list')
self.assertEqual(data.value, None)
self.assertEqual(data.size, 0)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_alternate_array_declaration_02(self):
test_string = 'var/medical[] = list()'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'medical')
self.assertEqual(data.type, '/list')
self.assertEqual(data.value, 'list()')
self.assertEqual(data.size, -1)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_complex_types(self):
test_string = 'var/datum/gas_mixture/air_temporary'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'air_temporary')
self.assertEqual(data.type, '/datum/gas_mixture')
self.assertEqual(data.value, None)
self.assertEqual(data.size, None)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_file_ref(self):
test_string = 'icon = \'butts.dmi\''
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'icon')
self.assertEqual(data.type, '/icon')
self.assertEqual(str(data.value), 'butts.dmi')
self.assertEqual(data.size, None)
self.assertEqual(data.declaration, False)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 39 | 66 | 0.62735 | import unittest
class ObjectTreeTests(unittest.TestCase):
def setUp(self):
from byond.objtree import ObjectTree
self.tree = ObjectTree()
def test_consumeVariable_basics(self):
test_string = 'var/obj/item/weapon/chainsaw = new'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'chainsaw')
self.assertEqual(data.type, '/obj/item/weapon')
self.assertEqual(data.value, 'new')
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_alternate_array_declaration_01(self):
test_string = 'var/appearance_keylist[0]'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'appearance_keylist')
self.assertEqual(data.type, '/list')
self.assertEqual(data.value, None)
self.assertEqual(data.size, 0)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_alternate_array_declaration_02(self):
test_string = 'var/medical[] = list()'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'medical')
self.assertEqual(data.type, '/list')
self.assertEqual(data.value, 'list()')
self.assertEqual(data.size, -1)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_complex_types(self):
test_string = 'var/datum/gas_mixture/air_temporary'
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'air_temporary')
self.assertEqual(data.type, '/datum/gas_mixture')
self.assertEqual(data.value, None)
self.assertEqual(data.size, None)
self.assertEqual(data.declaration, True)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
def test_consumeVariable_file_ref(self):
test_string = 'icon = \'butts.dmi\''
name, data = self.tree.consumeVariable(test_string, '', 0)
self.assertEqual(name, 'icon')
self.assertEqual(data.type, '/icon')
self.assertEqual(str(data.value), 'butts.dmi')
self.assertEqual(data.size, None)
self.assertEqual(data.declaration, False)
self.assertEqual(data.inherited, False)
self.assertEqual(data.special, None)
if __name__ == "__main__":
unittest.main()
| true | true |
f7f5ae86640c93cc2f1dfe6dd1ac1d60fa07919b | 343 | py | Python | PYSection508/PDFAudit.py | bengjerstad/PDF | 289a304f0c755d604de621165c5a53becc8ab11f | [
"MIT"
] | null | null | null | PYSection508/PDFAudit.py | bengjerstad/PDF | 289a304f0c755d604de621165c5a53becc8ab11f | [
"MIT"
] | null | null | null | PYSection508/PDFAudit.py | bengjerstad/PDF | 289a304f0c755d604de621165c5a53becc8ab11f | [
"MIT"
] | null | null | null | from pdfrw import PdfReader, PdfWriter
import os
defaultlang = '(en-US)'
#read all files in the folder called 'files'
files = os.listdir('files')
for file in files:
print(file)
trailer = PdfReader('files\\'+file)
print("Lang: ",trailer.Root.Lang)
print("Title: ",trailer.Info.Title)
print("MarkInfo: ",trailer.Root.MarkInfo)
print("")
| 22.866667 | 44 | 0.71137 | from pdfrw import PdfReader, PdfWriter
import os
defaultlang = '(en-US)'
files = os.listdir('files')
for file in files:
print(file)
trailer = PdfReader('files\\'+file)
print("Lang: ",trailer.Root.Lang)
print("Title: ",trailer.Info.Title)
print("MarkInfo: ",trailer.Root.MarkInfo)
print("")
| true | true |
f7f5b066a218c8a3aa63aa68f035a8eeab67da95 | 998 | py | Python | tests/zolegame/room_test.py | marisabele/Zole | 19ab7b417ad54d1072b010d62a09b7ff9d7c1fd0 | [
"Apache-2.0"
] | 2 | 2017-10-31T21:45:16.000Z | 2018-12-09T15:51:48.000Z | tests/zolegame/room_test.py | marisabele/Zole | 19ab7b417ad54d1072b010d62a09b7ff9d7c1fd0 | [
"Apache-2.0"
] | null | null | null | tests/zolegame/room_test.py | marisabele/Zole | 19ab7b417ad54d1072b010d62a09b7ff9d7c1fd0 | [
"Apache-2.0"
] | null | null | null | import unittest
from zolegame.room import Room
from zolegame.player import Player
from bots.random_player import RandomPlayer
from zolegame.players import PlayerInterface
class RoomTest(unittest.TestCase):
def setUp(self):
self.playerA = PlayerInterface()
self.playerB = PlayerInterface()
self.playerC = PlayerInterface()
self.room = Room(['t','d','d'], 10)
def test_addPlayers(self):
self.room.addPlayer(self.playerA)
self.room.addPlayer(self.playerB)
self.assertEqual(2, len(self.room.players))
self.room.addPlayer(self.playerC)
self.assertEqual(3, len(self.room.players))
def test_testPlay(self):
playerA = RandomPlayer()
playerB = RandomPlayer()
playerC = RandomPlayer()
self.room.addPlayer(playerA)
self.room.addPlayer(playerB)
self.room.addPlayer(playerC)
self.room.play()
| 34.413793 | 55 | 0.621242 | import unittest
from zolegame.room import Room
from zolegame.player import Player
from bots.random_player import RandomPlayer
from zolegame.players import PlayerInterface
class RoomTest(unittest.TestCase):
def setUp(self):
self.playerA = PlayerInterface()
self.playerB = PlayerInterface()
self.playerC = PlayerInterface()
self.room = Room(['t','d','d'], 10)
def test_addPlayers(self):
self.room.addPlayer(self.playerA)
self.room.addPlayer(self.playerB)
self.assertEqual(2, len(self.room.players))
self.room.addPlayer(self.playerC)
self.assertEqual(3, len(self.room.players))
def test_testPlay(self):
playerA = RandomPlayer()
playerB = RandomPlayer()
playerC = RandomPlayer()
self.room.addPlayer(playerA)
self.room.addPlayer(playerB)
self.room.addPlayer(playerC)
self.room.play()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.