code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
This module contains the indentation guide panel.
"""
# Third party imports
from qtpy.QtCore import Qt
from qtpy.QtGui import QPainter, QColor
from intervaltree import IntervalTree
# Local imports
from spyder.plugins.editor.utils.editor import TextBlockHelper
from spyder.api.panel import Panel
class IndentationGuide(Panel):
"""Indentation guides to easy identify nested blocks."""
# --- Qt Overrides
# -----------------------------------------------------------------
def __init__(self, editor):
"""Initialize IndentationGuide panel.
i_width(int): identation width in characters.
"""
Panel.__init__(self, editor)
self.color = Qt.darkGray
self.i_width = 4
self.bar_offset = 0
horizontal_scrollbar = editor.horizontalScrollBar()
horizontal_scrollbar.valueChanged.connect(self.update_bar_position)
horizontal_scrollbar.sliderReleased.connect(self.update)
def update_bar_position(self, value):
self.bar_offset = value
def paintEvent(self, event):
"""Override Qt method."""
painter = QPainter(self)
color = QColor(self.color)
color.setAlphaF(.5)
painter.setPen(color)
offset = self.editor.document().documentMargin() + \
self.editor.contentOffset().x()
folding_panel = self.editor.panels.get('FoldingPanel')
folding_regions = folding_panel.folding_regions
folding_status = folding_panel.folding_status
leading_whitespaces = self.editor.leading_whitespaces
for line_number in folding_regions:
post_update = False
end_line = folding_regions[line_number]
start_block = self.editor.document().findBlockByNumber(
line_number)
end_block = self.editor.document().findBlockByNumber(end_line - 1)
top = int(self.editor.blockBoundingGeometry(
start_block).translated(self.editor.contentOffset()).top())
bottom = int(self.editor.blockBoundingGeometry(
end_block).translated(self.editor.contentOffset()).bottom())
total_whitespace = leading_whitespaces.get(max(line_number - 1, 0))
end_whitespace = leading_whitespaces.get(end_line - 1)
if end_whitespace and end_whitespace != total_whitespace:
x = (self.editor.fontMetrics().width(total_whitespace * '9') +
self.bar_offset + offset)
painter.drawLine(x, top, x, bottom)
# --- Other methods
# -----------------------------------------------------------------
def set_enabled(self, state):
"""Toggle edge line visibility."""
self._enabled = state
self.setVisible(state)
# We need to request folding when toggling state so the lines
# are computed when handling the folding response.
self.editor.request_folding()
def update_color(self):
"""Set color using syntax highlighter color for comments."""
self.color = self.editor.highlighter.get_color_name('comment')
def set_indentation_width(self, indentation_width):
"""Set indentation width to be used to draw indent guides."""
self.i_width = indentation_width
| [
"qtpy.QtGui.QColor",
"qtpy.QtGui.QPainter",
"spyder.api.panel.Panel.__init__"
] | [((796, 824), 'spyder.api.panel.Panel.__init__', 'Panel.__init__', (['self', 'editor'], {}), '(self, editor)\n', (810, 824), False, 'from spyder.api.panel import Panel\n'), ((1273, 1287), 'qtpy.QtGui.QPainter', 'QPainter', (['self'], {}), '(self)\n', (1281, 1287), False, 'from qtpy.QtGui import QPainter, QColor\n'), ((1305, 1323), 'qtpy.QtGui.QColor', 'QColor', (['self.color'], {}), '(self.color)\n', (1311, 1323), False, 'from qtpy.QtGui import QPainter, QColor\n')] |
from invoke import Collection
from . import (
container,
cpython,
func,
git,
libs,
mxnet,
runtime,
)
ns = Collection(
container,
cpython,
func,
git,
libs,
mxnet,
runtime,
)
| [
"invoke.Collection"
] | [((136, 199), 'invoke.Collection', 'Collection', (['container', 'cpython', 'func', 'git', 'libs', 'mxnet', 'runtime'], {}), '(container, cpython, func, git, libs, mxnet, runtime)\n', (146, 199), False, 'from invoke import Collection\n')] |
import logging
from flask import Response, make_response, request
from microraiden import HTTPHeaders as header
from flask_restful.utils import unpack
from microraiden.channel_manager import (
ChannelManager,
)
from microraiden.exceptions import (
NoOpenChannel,
InvalidBalanceProof,
InvalidBalanceAmount,
InsufficientConfirmations
)
import microraiden.constants as constants
from microraiden.proxy.resources.request_data import RequestData
from functools import wraps
from eth_utils import is_address
log = logging.getLogger(__name__)
class Paywall(object):
def __init__(self,
channel_manager,
light_client_proxy=None
):
super().__init__()
assert isinstance(channel_manager, ChannelManager)
assert is_address(channel_manager.channel_manager_contract.address)
assert is_address(channel_manager.receiver)
self.contract_address = channel_manager.channel_manager_contract.address
self.receiver_address = channel_manager.receiver
self.channel_manager = channel_manager
self.light_client_proxy = light_client_proxy
def access(self, resource, method, *args, **kwargs):
if self.channel_manager.node_online() is False:
return "Ethereum node is not responding", 502
if self.channel_manager.get_eth_balance() < constants.PROXY_BALANCE_LIMIT:
return "Channel manager ETH balance is below limit", 502
try:
data = RequestData(request.headers, request.cookies)
except ValueError as e:
return str(e), 409
accepts_html = (
'text/html' in request.accept_mimetypes and
request.accept_mimetypes.best != '*/*'
)
headers = {}
price = resource.price()
# payment required
if price > 0:
paywall, headers = self.paywall_check(price, data)
if paywall and accepts_html is True:
reply_data = resource.get_paywall(request.path)
return self.reply_webui(reply_data, headers)
elif paywall:
return make_response('', 402, headers)
# all ok, return actual content
resp = method(request.path, *args, **kwargs)
# merge headers, resource headers take precedence
headers_lower = {key.lower(): value for key, value in headers.items()}
lower_to_case = {key.lower(): key for key in headers}
if isinstance(resp, Response):
resource_headers = (key for key, value in resp.headers)
else:
data, code, resource_headers = unpack(resp)
for key in resource_headers:
key_lower = key.lower()
if key_lower in headers_lower:
headers.pop(lower_to_case[key_lower])
if isinstance(resp, Response):
resp.headers.extend(headers)
return resp
else:
headers.update(resource_headers)
return make_response(str(data), code, resource_headers)
def paywall_check(self, price, data):
"""Check if the resource can be sent to the client.
Returns (is_paywalled: Bool, http_headers: dict)
"""
headers = self.generate_headers(price)
if not data.balance_signature:
return True, headers
# try to get an existing channel
try:
channel = self.channel_manager.verify_balance_proof(
data.sender_address, data.open_block_number,
data.balance, data.balance_signature)
except InsufficientConfirmations as e:
log.debug('Refused payment: Insufficient confirmations (sender=%s, block=%d)' %
(data.sender_address, data.open_block_number))
headers.update({header.INSUF_CONFS: "1"})
return True, headers
except NoOpenChannel as e:
log.debug('Refused payment: Channel does not exist (sender=%s, block=%d)' %
(data.sender_address, data.open_block_number))
headers.update({header.NONEXISTING_CHANNEL: "1"})
return True, headers
except InvalidBalanceAmount as e:
log.debug('Refused payment: Invalid balance amount: %s (sender=%s, block=%d)' %
(str(e), data.sender_address, data.open_block_number))
headers.update({header.INVALID_PROOF: 1})
return True, headers
except InvalidBalanceProof as e:
log.debug('Refused payment: Invalid balance proof: %s (sender=%s, block=%d)' %
(str(e), data.sender_address, data.open_block_number))
headers.update({header.INVALID_PROOF: 1})
return True, headers
# set headers to reflect channel state
assert channel.sender is not None
assert channel.balance >= 0
headers.update(
{
header.SENDER_ADDRESS: channel.sender,
header.SENDER_BALANCE: channel.balance
})
if channel.last_signature is not None:
headers.update({header.BALANCE_SIGNATURE: channel.last_signature})
amount_sent = data.balance - channel.balance
if amount_sent != 0 and amount_sent != price:
headers[header.INVALID_AMOUNT] = 1
# if difference is 0, it will be handled by channel manager
return True, headers
# set the headers to reflect actual state of a channel
try:
self.channel_manager.register_payment(
channel.sender,
data.open_block_number,
data.balance,
data.balance_signature)
except (InvalidBalanceAmount, InvalidBalanceProof):
# balance sent to the proxy is less than in the previous proof
return True, headers
# all ok, return premium content
return False, headers
# when are these generated?
def generate_headers(self, price: int):
assert price > 0
"""Generate basic headers that are sent back for every request"""
headers = {
header.GATEWAY_PATH: constants.API_PATH,
header.RECEIVER_ADDRESS: self.receiver_address,
header.CONTRACT_ADDRESS: self.contract_address,
header.TOKEN_ADDRESS: self.channel_manager.get_token_address(),
header.PRICE: price,
'Content-Type': 'application/json'
}
return headers
def reply_webui(self, reply_data='', headers: dict={}):
headers.update({
"Content-Type": "text/html",
})
reply = make_response(reply_data, 402, headers)
for k, v in headers.items():
if k.startswith('RDN-'):
reply.set_cookie(k, str(v))
return reply
def paywall_decorator(func):
"""Method decorator for Flask's Resource object. It magically makes
every method paywalled.
Example:
class MyPaywalledResource(Resource):
method_decorators = [paywall_decorator]
"""
@wraps(func)
def wrapper(*args, **kwargs):
self = func.__self__ # get instance of the bound method
return self.paywall.access(
self,
func,
*args,
**kwargs
)
return wrapper
| [
"logging.getLogger",
"flask_restful.utils.unpack",
"microraiden.proxy.resources.request_data.RequestData",
"eth_utils.is_address",
"functools.wraps",
"flask.make_response"
] | [((530, 557), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (547, 557), False, 'import logging\n'), ((7092, 7103), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (7097, 7103), False, 'from functools import wraps\n'), ((802, 862), 'eth_utils.is_address', 'is_address', (['channel_manager.channel_manager_contract.address'], {}), '(channel_manager.channel_manager_contract.address)\n', (812, 862), False, 'from eth_utils import is_address\n'), ((878, 914), 'eth_utils.is_address', 'is_address', (['channel_manager.receiver'], {}), '(channel_manager.receiver)\n', (888, 914), False, 'from eth_utils import is_address\n'), ((6659, 6698), 'flask.make_response', 'make_response', (['reply_data', '(402)', 'headers'], {}), '(reply_data, 402, headers)\n', (6672, 6698), False, 'from flask import Response, make_response, request\n'), ((1509, 1554), 'microraiden.proxy.resources.request_data.RequestData', 'RequestData', (['request.headers', 'request.cookies'], {}), '(request.headers, request.cookies)\n', (1520, 1554), False, 'from microraiden.proxy.resources.request_data import RequestData\n'), ((2642, 2654), 'flask_restful.utils.unpack', 'unpack', (['resp'], {}), '(resp)\n', (2648, 2654), False, 'from flask_restful.utils import unpack\n'), ((2151, 2182), 'flask.make_response', 'make_response', (['""""""', '(402)', 'headers'], {}), "('', 402, headers)\n", (2164, 2182), False, 'from flask import Response, make_response, request\n')] |
# -*- coding: UTF-8 -*-
"""
split_by_area
===========
Script : split_by_area.py
Author : <EMAIL>
Modified: 2018-08-27
Purpose : tools for working with numpy arrays
Notes:
-----
The xs and ys form pairs with the first and last points being identical
The pairs are constructed using n-1 to ensure that you don't form a
line from identical points.
First split polygon is a sample of a multipart. Added 0, 0 and 0, 80
back in
>>> xs = [0., 0., 80., 0, 0., 100., 100., 0.]
>>> ys = [0., 30., 30., 80., 100., 100., 0., 0.]
>>> a = np.array(list(zip(xs, ys))) * 1.0 # --- must be floats
>>> v = np.array([[50., 0], [50, 100.]])
>>> ext = np.array([[0., 0], [0, 100.],[100, 100.], [100., 0.], [0., 0.]])
return a, v
References:
----------
`<https://stackoverflow.com/questions/3252194/numpy-and-line-intersections>`_.
`<https://community.esri.com/message/627051?commentID=627051#comment-627051>`
`<https://community.esri.com/message/779043-re-how-to-divide-irregular-
polygon-into-equal-areas-using-arcgis-105?commentID=779043#comment-779043>`
This is a good one
`<https://tereshenkov.wordpress.com/2017/09/10/dividing-a-polygon-into-a-given
-number-of-equal-areas-with-arcpy/>`
---------------------------------------------------------------------
"""
# ---- imports, formats, constants ----
import sys
import math
from textwrap import dedent
import numpy as np
import warnings
from arcpytools_plt import (tweet, fc_info, _poly_ext,
trans_rot, cal_area, get_polys)
import arcpy
warnings.simplefilter('ignore', FutureWarning)
ft = {'bool': lambda x: repr(x.astype(np.int32)),
'float_kind': '{: 0.3f}'.format}
np.set_printoptions(edgeitems=5, linewidth=80, precision=2, suppress=True,
threshold=100, formatter=ft)
np.ma.masked_print_option.set_display('-') # change to a single -
script = sys.argv[0] # print this should you need to locate the script
# ---- Do the work or run the demo ------------------------------------------
#
frmt = """
Input features.... {}
Output features... {}
Number of splits . {}
Split types ...... {}
"""
def _cut_poly(poly, p_id, step=1.0, split_axis="X", split_fac=4, SR=None):
"""Perform the poly* cutting and return the result.
step : number
fractional step for division, 1.0 equates to 1%
split_face : number
number of areas to produce, 4, means split into 4 equal areas
"""
L, B, R, T = _poly_ext(poly)
# s_fac = math.ceil((R - L)/step)
# lefts = np.linspace(L+dx, R, num=s_fac, endpoint=True)
dx = step
dy = step
if split_axis == "X":
lefts = np.arange(L+dx, R+dx, dx, dtype='float')
splitters = np.array([[[l, B-1.0], [l, T+1.0]] for l in lefts])
elif s_axis == 'Y':
tops = np.arange(B+dy, T+dy, dy, dtype='float')
splitters = np.array([[[R+1.0, t], [L-1.0, t]] for t in tops])
cutters = []
for s in splitters:
s = s.tolist()
c = arcpy.Polyline(arcpy.Array([arcpy.Point(*xy) for xy in s]), SR)
cutters.append(c)
# ----
cuts = []
for i in cutters:
rght = poly
if i.crosses(poly):
try:
left, rght = poly.cut(i)
if rght is None:
cuts.append(left)
cuts.append(left)
poly = rght
rght = left
except RuntimeError:
tweet("Issues with poly...{}".format(p_id))
continue
else:
cuts.append(rght)
return cuts, cutters
def final_cut(cutters, poly):
""" final cut
"""
cuts = []
for i in cutters:
rght = poly
if i.crosses(poly):
try:
left, rght = poly.cut(i)
if rght is None:
cuts.append(left)
cuts.append(left)
poly = rght
rght = left
except RuntimeError:
tweet("Issues with poly...{}".format(p_id))
continue
else:
cuts.append(rght)
return cuts # , cutters
# ---- demo and tool section -------------------------------------------------
#
if len(sys.argv) == 1:
testing = False
in_pth = script.split("/")[:-2] + ["Polygon_lineTools.gdb"]
in_fc = "/".join(in_pth) + "/shapes_mtm9"
out_fc = "/".join(in_pth) + "/c0"
s_axis = "Y"
s_fac = 4
else:
testing = False
in_fc = sys.argv[1]
out_fc = sys.argv[2]
s_fac = int(sys.argv[3])
s_axis = sys.argv[4]
# ---- for both
#
shp_fld, oid_fld, shp_type, SR = fc_info(in_fc)
out_polys, out_ids = get_polys(in_fc)
#old_ids = np.repeat(out_ids, s_fac) # produce data for the output id field
# ---- instant bail
if SR.type == 'Projected':
result_ = []
for i in range(len(out_polys)):
poly = out_polys[i]
p_id = out_ids[i]
cuts, cutters = _cut_poly(poly, p_id, step=1,
split_axis = s_axis,
split_fac=4, SR=SR)
idxs = cal_area(poly, cuts, cutters, s_fac)
f_cutters = [cutters[i] for i in idxs]
r = final_cut(f_cutters, poly)
result_.extend(r)
if not testing:
if arcpy.Exists(out_fc):
arcpy.Delete_management(out_fc)
arcpy.CopyFeatures_management(result_, out_fc)
out_ids = np.repeat(out_ids, s_fac)
id_fld = np.zeros((len(result_),),
dtype=[("key", "<i4"), ("Old_ID", "<i4")])
id_fld["key"] = np.arange(1, len(result_) + 1)
id_fld["Old_ID"] = out_ids
arcpy.da.ExtendTable(out_fc, oid_fld, id_fld, "key")
else:
msg = """
-----------------------------------------------------------------
Input data is not in a projected coordinate system....
bailing...
-----------------------------------------------------------------
"""
tweet(msg)
# ----------------------------------------------------------------------
# __main__ .... code section
if __name__ == "__main__":
"""Optionally...
: - print the script source name.
: - run the _demo
"""
| [
"arcpy.CopyFeatures_management",
"numpy.repeat",
"arcpytools_plt.fc_info",
"numpy.set_printoptions",
"arcpytools_plt.cal_area",
"arcpy.Point",
"arcpytools_plt.get_polys",
"numpy.array",
"arcpytools_plt.tweet",
"arcpy.Exists",
"arcpytools_plt._poly_ext",
"arcpy.da.ExtendTable",
"arcpy.Delete_... | [((1572, 1618), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'FutureWarning'], {}), "('ignore', FutureWarning)\n", (1593, 1618), False, 'import warnings\n'), ((1713, 1820), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'edgeitems': '(5)', 'linewidth': '(80)', 'precision': '(2)', 'suppress': '(True)', 'threshold': '(100)', 'formatter': 'ft'}), '(edgeitems=5, linewidth=80, precision=2, suppress=True,\n threshold=100, formatter=ft)\n', (1732, 1820), True, 'import numpy as np\n'), ((1839, 1881), 'numpy.ma.masked_print_option.set_display', 'np.ma.masked_print_option.set_display', (['"""-"""'], {}), "('-')\n", (1876, 1881), True, 'import numpy as np\n'), ((4742, 4756), 'arcpytools_plt.fc_info', 'fc_info', (['in_fc'], {}), '(in_fc)\n', (4749, 4756), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((4779, 4795), 'arcpytools_plt.get_polys', 'get_polys', (['in_fc'], {}), '(in_fc)\n', (4788, 4795), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((2514, 2529), 'arcpytools_plt._poly_ext', '_poly_ext', (['poly'], {}), '(poly)\n', (2523, 2529), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((6093, 6103), 'arcpytools_plt.tweet', 'tweet', (['msg'], {}), '(msg)\n', (6098, 6103), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((2703, 2747), 'numpy.arange', 'np.arange', (['(L + dx)', '(R + dx)', 'dx'], {'dtype': '"""float"""'}), "(L + dx, R + dx, dx, dtype='float')\n", (2712, 2747), True, 'import numpy as np\n'), ((2765, 2820), 'numpy.array', 'np.array', (['[[[l, B - 1.0], [l, T + 1.0]] for l in lefts]'], {}), '([[[l, B - 1.0], [l, T + 1.0]] for l in lefts])\n', (2773, 2820), True, 'import numpy as np\n'), ((5218, 5254), 'arcpytools_plt.cal_area', 'cal_area', (['poly', 'cuts', 'cutters', 's_fac'], {}), '(poly, cuts, cutters, s_fac)\n', (5226, 5254), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((5403, 5423), 'arcpy.Exists', 'arcpy.Exists', (['out_fc'], {}), '(out_fc)\n', (5415, 5423), False, 'import arcpy\n'), ((5479, 5525), 'arcpy.CopyFeatures_management', 'arcpy.CopyFeatures_management', (['result_', 'out_fc'], {}), '(result_, out_fc)\n', (5508, 5525), False, 'import arcpy\n'), ((5545, 5570), 'numpy.repeat', 'np.repeat', (['out_ids', 's_fac'], {}), '(out_ids, s_fac)\n', (5554, 5570), True, 'import numpy as np\n'), ((5786, 5838), 'arcpy.da.ExtendTable', 'arcpy.da.ExtendTable', (['out_fc', 'oid_fld', 'id_fld', '"""key"""'], {}), "(out_fc, oid_fld, id_fld, 'key')\n", (5806, 5838), False, 'import arcpy\n'), ((2858, 2902), 'numpy.arange', 'np.arange', (['(B + dy)', '(T + dy)', 'dy'], {'dtype': '"""float"""'}), "(B + dy, T + dy, dy, dtype='float')\n", (2867, 2902), True, 'import numpy as np\n'), ((2920, 2974), 'numpy.array', 'np.array', (['[[[R + 1.0, t], [L - 1.0, t]] for t in tops]'], {}), '([[[R + 1.0, t], [L - 1.0, t]] for t in tops])\n', (2928, 2974), True, 'import numpy as np\n'), ((5438, 5469), 'arcpy.Delete_management', 'arcpy.Delete_management', (['out_fc'], {}), '(out_fc)\n', (5461, 5469), False, 'import arcpy\n'), ((3079, 3095), 'arcpy.Point', 'arcpy.Point', (['*xy'], {}), '(*xy)\n', (3090, 3095), False, 'import arcpy\n')] |
from scipy import linalg
from sklearn.decomposition import PCA
from scipy.optimize import linear_sum_assignment as linear_assignment
import numpy as np
"""
A function that takes a list of clusters, and a list of centroids for each cluster, and outputs the N max closest images in each cluster to its centroids
"""
def closest_to_centroid(clusters,centroids,nb_closest=20):
output = [[] for i in range(len(centroids))]
#print(clusters)
for i in range(len(centroids)):
centroid = centroids[i]
cluster = clusters[i]
try :
cluste_temp = [x.cpu() if x.is_cuda else x for x in cluster]
except :
cluste_temp = cluster
cluster = [list(x) for x in cluste_temp]
nb_components = 7 if len(cluster)>10 else len(cluster) - 1
pca = PCA(n_components=nb_components) #args.sty_dim)
if len(cluster) > nb_closest :
cluster = pca.fit_transform(cluster)
centroid = centroid.reshape(1, -1)
centroid = pca.transform(centroid)
distances = [linalg.norm(x-centroid) for x in cluster]
duplicate_distances = distances
distances.sort()
if len(distances)>=nb_closest :
distances = distances[:nb_closest]
output[i] = [True if x in distances else False for x in duplicate_distances]
return output
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_assignment(w.max() - w)
indi = list(ind[0])
indj = list(ind[1])
the_sum = sum([w[i, j] for i, j in zip(indi,indj)])
return the_sum * 1.0 / y_pred.size
| [
"sklearn.decomposition.PCA",
"numpy.zeros",
"scipy.linalg.norm"
] | [((1792, 1824), 'numpy.zeros', 'np.zeros', (['(D, D)'], {'dtype': 'np.int64'}), '((D, D), dtype=np.int64)\n', (1800, 1824), True, 'import numpy as np\n'), ((809, 840), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'nb_components'}), '(n_components=nb_components)\n', (812, 840), False, 'from sklearn.decomposition import PCA\n'), ((1059, 1084), 'scipy.linalg.norm', 'linalg.norm', (['(x - centroid)'], {}), '(x - centroid)\n', (1070, 1084), False, 'from scipy import linalg\n')] |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
from phantastes import views
from django.contrib import admin
urlpatterns = patterns(
"",
url(r"^$", views.index, name="home"),
url(r"^forum/", include('spirit.urls')),
url(r"^admin/", include(admin.site.urls)),
url(r"^account/", include("account.urls")),
url(r"^profile/", include("profiles.urls", namespace="profiles")),
url(r"^polls/", include("polls.urls", namespace="polls")),
url(r"^readings/", include("readings.urls", namespace="readings")),
url(r"^about/$", views.about, name="about"),
url(r'^chat/', include('djangoChat.urls', namespace="djangoChat")),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.conf.urls.include",
"django.conf.urls.static.static",
"django.conf.urls.url"
] | [((798, 859), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (804, 859), False, 'from django.conf.urls.static import static\n'), ((275, 310), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""home"""'}), "('^$', views.index, name='home')\n", (278, 310), False, 'from django.conf.urls import patterns, include, url\n'), ((663, 705), 'django.conf.urls.url', 'url', (['"""^about/$"""', 'views.about'], {'name': '"""about"""'}), "('^about/$', views.about, name='about')\n", (666, 705), False, 'from django.conf.urls import patterns, include, url\n'), ((333, 355), 'django.conf.urls.include', 'include', (['"""spirit.urls"""'], {}), "('spirit.urls')\n", (340, 355), False, 'from django.conf.urls import patterns, include, url\n'), ((378, 402), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (385, 402), False, 'from django.conf.urls import patterns, include, url\n'), ((427, 450), 'django.conf.urls.include', 'include', (['"""account.urls"""'], {}), "('account.urls')\n", (434, 450), False, 'from django.conf.urls import patterns, include, url\n'), ((475, 521), 'django.conf.urls.include', 'include', (['"""profiles.urls"""'], {'namespace': '"""profiles"""'}), "('profiles.urls', namespace='profiles')\n", (482, 521), False, 'from django.conf.urls import patterns, include, url\n'), ((544, 584), 'django.conf.urls.include', 'include', (['"""polls.urls"""'], {'namespace': '"""polls"""'}), "('polls.urls', namespace='polls')\n", (551, 584), False, 'from django.conf.urls import patterns, include, url\n'), ((610, 656), 'django.conf.urls.include', 'include', (['"""readings.urls"""'], {'namespace': '"""readings"""'}), "('readings.urls', namespace='readings')\n", (617, 656), False, 'from django.conf.urls import patterns, include, url\n'), ((727, 777), 'django.conf.urls.include', 'include', (['"""djangoChat.urls"""'], {'namespace': '"""djangoChat"""'}), "('djangoChat.urls', namespace='djangoChat')\n", (734, 777), False, 'from django.conf.urls import patterns, include, url\n')] |
import copy
import time
from datetime import datetime
import binascii
import graphviz
import random
from .models.enums.start_location import StartLocation
from .models.enums.goal import Goal
from .models.enums.statue_req import StatueReq
from .models.enums.entrance_shuffle import EntranceShuffle
from .models.enums.enemizer import Enemizer
from .models.enums.logic import Logic
from .models.randomizer_data import RandomizerData
MAX_INVENTORY = 15
PROGRESS_ADJ = [1.5, 1.25, 1, 0.75] # Required items are more likely to be placed in easier modes
MAX_CYCLES = 100
INACCESSIBLE = 9999
class World:
# Assigns item to location
def fill_item(self, item, location=-1,test=False,override_restrictions=False,print_log=False):
if location == -1:
return False
elif self.item_locations[location][2]:
if print_log:
print("ERROR: Attempted to place an item in a full location")
return False
elif item in self.item_locations[location][4] and not override_restrictions:
if print_log:
print("ERROR: Attempt to place item in a restricted location:",[self.item_pool[item][3],self.item_locations[location][9]])
return False
elif test:
return True
self.item_pool[item][0] -= 1
self.item_locations[location][2] = True
self.item_locations[location][3] = item
if print_log:
print(" ",self.item_pool[item][3],"->",self.item_locations[location][9])
if self.is_accessible(self.item_locations[location][0]):
self.items_collected.append(item)
if location in self.open_locations[0]:
self.open_locations[0].remove(location)
elif location in self.open_locations[1]:
self.open_locations[1].remove(location)
self.placement_log.append([item, location])
#if self.item_locations[location][1] == 2:
# self.check_logic()
return True
# Removes an assigned item and returns it to item pool
def unfill_item(self, location=-1, print_log=False):
if location == -1:
return -1
elif not self.item_locations[location][2]:
return -1
item = self.item_locations[location][3]
self.item_locations[location][2] = False
self.item_locations[location][3] = 0
self.item_pool[item][0] += 1
if print_log:
print(" ",self.item_pool[item][3],"<-",self.item_locations[location][9],"removed")
if self.is_accessible(self.item_locations[location][0]):
if item in self.items_collected:
self.items_collected.remove(item)
type = self.item_pool[item][1]
if location not in self.open_locations[type-1]:
self.open_locations[type-1].append(location)
for x in self.placement_log:
if x[1] == location:
self.placement_log.remove(x)
return item
# Converts item pool into list of unique items, returns list
def list_item_pool(self, type=0, items=[], progress_type=0):
item_list = []
for x in self.item_pool:
if not items or x in items:
if not type or type == self.item_pool[x][1]:
if not progress_type or progress_type == self.item_pool[x][5]:
i = 0
while i < self.item_pool[x][0]:
item_list.append(x)
i += 1
return item_list
# Returns a list of unfilled item locations
def list_item_locations(self):
locations = []
for x in self.item_locations:
locations.append(x)
return locations
# Returns list of graph edges
def list_logic(self):
edges = []
for x in self.logic:
edges.append(x)
return edges
# Checks if one list is contained inside another list
def is_sublist(self, list, sublist):
if sublist == []:
return True
elif sublist == list:
return True
elif len(sublist) > len(list):
return False
l = list[:]
for x in sublist:
if x in l:
l.remove(x)
else:
return False
return True
# Returns lists of accessible item, ability, and statue locations
def find_open_locations(self):
# Accessible open location for items, abilities, and Mystic Statues
locations = [[], [], [], []]
for x in self.item_locations:
region = self.item_locations[x][0]
type = self.item_locations[x][1]
if self.graph[region][0] and not self.item_locations[x][2]:
locations[type - 1].append(x)
self.open_locations[0] = locations[0][:]
self.open_locations[1] = locations[1][:]
return locations
# Returns graph node of an item location
def location_node(self, location_id=-1,print_log=False):
if location_id not in self.item_locations:
if print_log:
print("ERROR: Invalid item location", location_id)
return False
else:
return self.item_locations[location_id][0]
# Returns whether an item location is already filled with an item
def is_filled(self, location_id=-1,print_log=False):
if location_id not in self.item_locations:
if print_log:
print("ERROR: Invalid item location", location_id)
return False
else:
return self.item_locations[location_id][2]
# Zeroes out accessible flags for all world regions
def is_accessible(self, node_id=-1):
if node_id not in self.graph:
return False
elif self.graph[node_id][0]:
return True
else:
return False
# Zeroes out accessible flags for all world regions
def unsolve(self,reset_graph=False):
for x in self.graph:
self.graph[x][0] = False
if reset_graph:
self.graph[x][4] = 0
self.graph[x][8].clear()
self.graph[x][9].clear()
self.graph[x][10] = self.graph[x][1][:]
for x in self.logic:
if self.logic[x][0] == 1:
self.logic[x][0] = 0
return True
# Resets collected items and other traversal data
def reset_progress(self,reset_graph=False):
self.visited.clear()
self.items_collected.clear()
self.item_destinations.clear()
self.open_locations = [[],[]]
self.open_edges = []
self.unsolve(reset_graph)
return True
# Finds every accessible node in the graph
# Collects items into self.items_collected, edges into self.open_edges
def traverse(self,to_visit=[],test=False,print_log=False):
if print_log:
print(" Beginning traversal...")
visited = []
new_items = []
if not to_visit:
to_visit.append(0)
while to_visit:
node = to_visit.pop(0)
visited.append(node)
if print_log:
print(" Visiting:",self.graph[node][5])
# If we haven't been here yet...
if not self.graph[node][0]:
# Get the newly-accessible items and record open item/ability locations
new_items += self.visit_node(node,test,print_log)
# Queue up newly-accessible places to visit
for x in self.graph[node][10]:
if x != node and not self.is_accessible(x) and x not in to_visit+visited:
to_visit.insert(0,x)
if print_log:
print(" -Discovered:",self.graph[x][5])
# If we've run out of places to visit, check if logic has opened up any new nodes
if not to_visit:
open_edges = self.get_open_edges(visited)
bad_edges = []
if print_log:
print(" Ran out of places - updating logic:")
for edge in open_edges:
dest = self.logic[edge][2]
if self.check_edge(edge,[],False) and dest not in to_visit:
self.logic[edge][0] = 1
to_visit.append(dest)
if print_log:
print(" -Discovered:",self.graph[dest][5])
else:
bad_edges.append(edge)
if not test:
self.open_edges = bad_edges
return [visited,new_items]
# Return list of logic edges that originate in an accessible node and end in an inaccessible node
def get_open_edges(self,nodes=[]):
test_edges = self.open_edges[:]
open_edges = []
for x in nodes:
if not self.is_accessible(x):
test_edges += self.graph[x][12]
for edge in test_edges:
origin = self.logic[edge][1]
dest = self.logic[edge][2]
if self.logic[edge][0] >= 0 and not self.is_accessible(dest) and dest not in nodes:
open_edges.append(edge)
return open_edges
# Visit a node, update graph info, return new items collected
def visit_node(self,node,test=False,print_log=False):
if not test and not self.graph[node][0]:
self.graph[node][0] = True
self.visited.append(node)
self.item_destinations += self.graph[node][6]
self.open_edges += self.graph[node][12]
return self.collect_items(node,test,print_log)
# Collect all items in given node
def collect_items(self,node=-1,test=False,print_log=False):
if node not in self.graph:
return False
items_found = []
for location in self.graph[node][11]:
if self.item_locations[location][2]:
items_found.append(self.item_locations[location][3])
if not test:
self.items_collected.append(self.item_locations[location][3])
if print_log:
print(" -Collected:",self.item_pool[self.item_locations[location][3]][3])
elif self.item_locations[location][1] == 1 and not test:
self.open_locations[0].append(location)
if print_log:
print(" -Discovered:",self.item_locations[location][9])
elif self.item_locations[location][1] == 2 and not test:
self.open_locations[1].append(location)
if print_log:
print(" -Discovered:",self.item_locations[location][9])
return items_found
# Returns full list of accessible locations
def accessible_locations(self, item_locations):
accessible = []
for x in item_locations:
region = self.item_locations[x][0]
if self.is_accessible(region):
accessible.append(x)
return accessible
# Returns full list of inaccessible locations
def inaccessible_locations(self, item_locations):
inaccessible = []
for x in item_locations:
region = self.item_locations[x][0]
if not self.is_accessible(region):
inaccessible.append(x)
return inaccessible
# Fill a list of items randomly in a list of locations
def random_fill(self, items=[], item_locations=[], accessible=True, print_log=False):
if not items:
return True
elif not item_locations:
return False
to_place = items[:]
to_fill = item_locations[:]
while to_place:
item = to_place.pop(0)
item_type = self.item_pool[item][1]
placed = False
i = 0
for dest in to_fill:
if not placed:
region = self.item_locations[dest][0]
location_type = self.item_locations[dest][1]
filled = self.item_locations[dest][2]
restrictions = self.item_locations[dest][4]
if not filled and item_type == location_type and item not in restrictions:
if not accessible or region != INACCESSIBLE:
if self.fill_item(item, dest, False, False, print_log):
to_fill.remove(dest)
placed = True
return True
# Place list of items into random accessible locations
def forward_fill(self, items=[], item_locations=[], test=False, override_restrictions=False, print_log=False):
if not items:
return True
elif not item_locations:
if print_log:
print("ERROR: No item locations given")
return False
to_place = items[:]
to_fill =[[],[],[]]
for loc in item_locations:
if not self.item_locations[loc][2] and self.is_accessible(self.item_locations[loc][0]):
loc_type = self.item_locations[loc][1]
to_fill[loc_type-1].append(loc)
quarantine = [[],[],[]]
filled_locations = []
while to_place:
item = to_place.pop(0)
item_type = self.item_pool[item][1]
filled = False
while not filled and to_fill[item_type-1]:
location = to_fill[item_type-1].pop(0)
if self.fill_item(item,location,test,override_restrictions,print_log):
filled = True
filled_locations.append(location)
to_fill[item_type-1] += quarantine[item_type-1]
else:
quarantine[item_type-1].append(location)
items.append(item)
if not filled:
if print_log:
print("ERROR: Not enough room to place items")
return False
return True
# Convert a prerequisite to a list of items needed to fulfill it
def items_needed(self, edge=0):
if not edge:
return []
prereq = []
for req in self.logic[edge][4]:
item = req[0]
ct = req[1]
i = 0
while i < ct:
prereq.append(item)
i += 1
if not self.items_collected:
return prereq
prereq_new = []
items_new = self.items_collected[:]
while prereq:
x = prereq.pop(0)
if x in items_new:
items_new.remove(x)
else:
prereq_new.append(x)
return prereq_new
# Returns list of item combinations that grant progression
# Returns progression list in the following categories: [[available],[not enough room],[too many inventory items]]
def progression_list(self,open_edges=[]):
if not open_edges:
open_edges = self.get_open_edges()
all_items = self.list_item_pool(1)
#open_locations = self.find_open_locations()
open_locations = len(self.open_locations[0])
prereq_list = [[],[],[]] # [[available],[not enough room],[too many inventory items]]
ds_list = []
for edge in open_edges:
prereq = self.items_needed(edge)
if prereq and prereq not in prereq_list[0] and self.is_sublist(all_items, prereq):
if prereq not in prereq_list[1] and not self.forward_fill(prereq,self.open_locations[0],True,self.logic_mode == "Chaos"):
prereq_list[1].append(prereq)
elif prereq not in prereq_list[2]:
dest = self.logic[edge][2]
traverse_result = self.traverse([dest],True)
new_nodes = traverse_result[0]
start_items_temp = self.items_collected[:] + prereq + traverse_result[1]
item_destinations_temp = self.item_destinations[:]
for x in new_nodes:
item_destinations_temp += self.graph[x][6]
inv_temp = self.get_inventory(start_items_temp,item_destinations_temp)
if len(inv_temp) <= MAX_INVENTORY:
if self.entrance_shuffle == "None" or self.check_ds_access(dest,False,start_items_temp):
prereq_list[0].append(prereq)
else:
ds_list.append(prereq)
else:
prereq_list[2].append(prereq)
if prereq_list == [[],[],[]]:
prereq_list[0] += ds_list
return prereq_list
# Find and clear non-progression item to make room for progression item
def make_room(self, progression_result, print_log=False):
# For inventory bottlenecks, remove one inventory item and try again
if not progression_result[1] and progression_result[2]:
return self.remove_nonprog(1,0,True,print_log)
success = False
for node in self.visited:
if not success:
for x in self.graph[node][11]:
if self.is_filled(x) and self.item_pool[self.item_locations[x][3]][5]>1:
if self.unfill_item(x,print_log):
success = True
return success
#### THIS IS OLD, OBSELETE CODE
# non_prog_locations = [[],[]]
# open_locations = len(self.open_locations[0])
# open_abilities = len(self.open_locations[1])
# unfilled = []
# min_prereqs = []
# min_item_ct = 0
# min_ability_ct = 0
# progression_list = progression_result[1][:]
# while progression_list:
# prereq = progression_list.pop(0)
# items_needed = -open_locations
# abilities_needed = -open_abilities
# for x in prereq:
# if self.item_pool[x][1] == 1:
# items_needed += 1
# elif self.item_pool[x][1] == 2:
# abilities_needed += 1
# items_needed = max(0,items_needed)
# abilities_needed = max(0,abilities_needed)
# if not min_prereqs or min_item_ct+min_ability_ct > items_needed + abilities_needed:
# min_prereqs = [prereq]
# min_item_ct = items_needed
# min_ability_ct = abilities_needed
# elif min_prereqs and min_item_ct == items_needed and min_ability_ct == abilities_needed:
# min_prereqs.append(prereq)
#
# if not self.remove_nonprog(min_item_ct,min_ability_ct,False,print_log):
# if print_log:
# print("ERROR: Could not make room")
# return False
#
# return min_prereqs
# Remove an accessible non-progression item to make room for a progression item
def remove_nonprog(self,item_ct=0,ability_ct=0,inv=False,print_log=False):
junk_locations = [[],[]]
quest_locations = [[],[]]
for location in self.item_locations:
if self.item_locations[location][2] and self.is_accessible(self.item_locations[location][0]):
item = self.item_locations[location][3]
type = self.item_pool[item][1]
prog_type = self.item_pool[item][5]
inv_type = self.item_pool[item][4]
if type <= 2:
if prog_type == 2:
quest_locations[type-1].append(location)
elif prog_type == 3:
if not inv or inv_type:
junk_locations[type-1].append(location)
random.shuffle(junk_locations[0])
random.shuffle(junk_locations[1])
random.shuffle(quest_locations[0])
random.shuffle(quest_locations[1])
quest = False
type = 1
locations = junk_locations[0]
count = item_ct
done = False
items_removed = []
while not done:
if not count and type == 1:
type == 2
count = ability_ct
quest = False
locations = junk_locations[1]
if not count and type == 2:
done = True
else:
if not locations and not quest:
quest = True
locations = quest_locations[type-1]
if not locations:
if print_log:
print("ERROR: Not enough room")
return False
location = locations.pop(0)
items_removed.append(self.unfill_item(location))
count -= 1
if print_log:
print(" Removed these items:",items_removed)
return items_removed
# Converts a progression list into a normalized Monte Carlo distribution
def monte_carlo(self, progression_ls=[], start_items=[]):
if not progression_ls:
return []
progression = progression_ls[:]
items = self.list_item_pool(1)
abilities = self.list_item_pool(2)
all_items = items + abilities
sum_items = len(items)
sum_abilities = len(abilities)
probability = []
monte_carlo = []
sum_prob = 0
sum_edges = 0
probabilities = []
idx = 0
while progression:
current_prereq = progression.pop(0)
prereqs = current_prereq[:]
probability = 1.0
i = 0
j = 0
while prereqs:
item = prereqs.pop(0)
if item in all_items:
if self.item_pool[item][1] == 1:
probability *= float(self.item_pool[item][0]) / float((sum_items - i))
i += 1
elif self.item_pool[item][1] == 2:
probability *= float(self.item_pool[item][0]) / float((sum_abilities - j))
j += 1
if item in self.required_items:
probability *= PROGRESS_ADJ[self.difficulty]
probabilities.append([probability, idx])
sum_prob += probability
sum_edges += 1
idx += 1
prob_adj = 100.0 / sum_prob
rolling_sum = 0.0
for x in probabilities:
x[0] = x[0] * prob_adj + rolling_sum
rolling_sum = x[0]
# print probabilities
return probabilities
# Returns a list of map lists, by boss
def get_maps(self):
maps = [[], [], [], [], [], [], []]
for map in self.maps:
boss = self.maps[map][1]
maps[boss].append(map)
maps.pop(0)
return maps
# Randomize map-clearing rewards
def map_rewards(self):
maps = self.get_maps()
# print maps
for area in maps:
random.shuffle(area)
boss_rewards = 4
# Total rewards by type, by level (HP/STR/DEF)
if "Z3 Mode" in self.variant:
rewards_tier1 = [1] * 6 # Expert: 6 HP
rewards_tier2 = [1] * 6 # Advanced: 12 HP
rewards_tier3 = [1] * 6 # Intermediate: 18 HP
rewards_tier4 = [] # Beginner: 18 HP
else: # Remove all HP upgrades
rewards_tier1 = [1,1,1,1,1,1] # Expert: 6/0/0
rewards_tier2 = [1,1,2,2,3,3] # Advanced: 8/2/2
rewards_tier3 = [1,1,2,2,3,3] # Intermediate: 10/4/4
rewards_tier4 = [2,2,2,3,3,3] # Beginner: 10/7/7
# Remove HP upgrades in OHKO
if "OHKO" in self.variant:
for n, i in enumerate(rewards_tier1):
if i == 1:
rewards_tier1[n] = 0
for n, i in enumerate(rewards_tier2):
if i == 1:
rewards_tier2[n] = 0
for n, i in enumerate(rewards_tier3):
if i == 1:
rewards_tier3[n] = 0
for n, i in enumerate(rewards_tier4):
if i == 1:
rewards_tier4[n] = 0
random.shuffle(rewards_tier1)
random.shuffle(rewards_tier2)
random.shuffle(rewards_tier3)
random.shuffle(rewards_tier4)
# Allocate rewards to maps
for area in maps:
random.shuffle(area)
self.maps[area[0]][2] = [rewards_tier1.pop(0),1]
self.maps[area[1]][2] = [rewards_tier2.pop(0),2]
self.maps[area[2]][2] = [rewards_tier3.pop(0),3]
if rewards_tier4:
self.maps[area[3]][2] = [rewards_tier4.pop(0),4]
else:
self.maps[area[3]][2] = [0,4]
# Place Mystic Statues in World
def fill_statues(self, locations=[148, 149, 150, 151, 152, 153]):
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
return self.random_fill([106]*6, locations)
return self.random_fill([100, 101, 102, 103, 104, 105], locations)
def lock_dark_spaces(self,print_log=False):
nodes = []
for edge in self.logic:
if self.logic[edge][0] >-1 and self.logic[edge][3]:
nodes.append(self.logic[edge][1])
for node in nodes:
if not self.check_ds_access(node, True):
if print_log:
print("ERROR: No Dark Space could be accessed ")
return False
else:
found_locked_ds = False
nodes_to_check = self.graph[node][9][:]
random.shuffle(nodes_to_check)
while not found_locked_ds and nodes_to_check:
ds_node = nodes_to_check.pop(0)
ds_loc = self.ds_locations[self.ds_nodes.index(ds_node)]
if self.item_locations[ds_loc][2] and not self.item_locations[ds_loc][3]:
found_locked_ds = True
#if print_log:
# print(" -Found:",self.item_locations[ds_loc][9])
if not found_locked_ds:
self.item_locations[ds_loc][2] = True
if self.item_locations[ds_loc][3]:
self.unfill_item(ds_loc)
if print_log:
print(" -Locked:",self.item_locations[ds_loc][9])
return True
# Determine an exit's direction (e.g. outside to inside)
def is_exit_coupled(self,exit,print_log=False):
if exit not in self.exits:
return False
if self.exits[exit][0]:
sister_exit = self.exits[exit][0]
if self.exits[sister_exit][0] == exit:
return sister_exit
else:
if print_log:
print("WARNING: Exits linked incorrectly",exit,sister_exit)
return sister_exit
return False
# Determine an exit's direction (e.g. outside to inside)
def exit_direction(self,exit):
if exit not in self.exits:
return False
origin = self.exits[exit][3]
dest = self.exits[exit][4]
if self.graph[origin][2] == 2:
o_type = 2
else:
o_type = 1
if self.graph[dest][2] == 2:
d_type = 2
else:
d_type = 1
# return (o_type,d_type)
if o_type == 2 and d_type == 2:
return (1,1)
else:
return d_type
# Get lists of unmatched origin/destination exits
# def get_remaining_exits(self):
# exits_remaining = [[],[]]
# for exit in self.exits:
# if self.exits[exit][1] == -1:
# exits_remaining[0].append(exit)
# if self.exits[exit][2] == -1:
# exits_remaining[1].append(exit)
# return exits_remaining
# Link one exit to another
def link_exits(self, origin_exit, dest_exit, print_log=False, check_connections=True, update_graph=True):
if origin_exit not in self.exits:
if print_log:
print("ERROR: Invalid origin (link)", origin_exit)
return False
if dest_exit not in self.exits:
if print_log:
print("ERROR: Invalid destination (link)", dest_exit)
return False
if print_log and self.exits[origin_exit][1] != -1 and origin_exit > 21:
print("WARNING: Origin already linked", origin_exit)
if print_log and self.exits[dest_exit][2] != -1 and dest_exit > 21:
print("WARNING: Destination already linked", dest_exit)
self.exits[origin_exit][1] = dest_exit
self.exits[dest_exit][2] = origin_exit
self.exit_log.append([origin_exit,dest_exit])
if print_log:
print(" Linked",self.exits[origin_exit][10], "-", self.exits[dest_exit][10])
if update_graph and self.exits[origin_exit][5]:
origin = self.exits[origin_exit][3]
dest = self.exits[dest_exit][4]
if dest not in self.graph[origin][1]:
self.graph[origin][1].append(dest)
self.new_connection(origin,dest)
if (origin_exit <= 21 or self.entrance_shuffle != "Uncoupled") and check_connections and self.is_exit_coupled(origin_exit) and self.is_exit_coupled(dest_exit):
new_origin = self.exits[dest_exit][0]
new_dest = self.exits[origin_exit][0]
if new_origin <= 21: # Boss exits
if self.exits[new_origin][5] or new_origin in self.exits_detailed:
self.link_exits(new_origin, new_dest, print_log, False, update_graph)
else:
if self.exits[new_origin][1] != -1 or self.exits[new_dest][2] != -1:
if print_log:
print("WARNING: Return exit already linked:",new_origin,new_dest)
else:
self.link_exits(new_origin, new_dest, print_log, False, update_graph)
return True
# Unlinks two previously linked exits
def unlink_exits(self, origin_exit, dest_exit, print_log=False, check_connections=True, update_graph=True):
if origin_exit not in self.exits:
if print_log:
print("ERROR: Invalid origin (unlink)", origin_exit)
return False
if dest_exit not in self.exits:
if print_log:
print("ERROR: Invalid destination (unlink)", dest_exit)
return False
if print_log and (self.exits[origin_exit][1] != dest_exit or self.exits[dest_exit][2] != origin_exit):
if print_log:
print("WARNING: Attempted to unlink exits that are not correctly linked:", origin_exit, dest_exit)
self.exits[origin_exit][1] = -1
self.exits[dest_exit][2] = -1
for x in self.exit_log:
if x[0] == origin_exit:
self.exit_log.remove(x)
if print_log:
print(" Unlinked",self.exits[origin_exit][10], "-", self.exits[dest_exit][10])
if update_graph and self.exits[origin_exit][5]:
origin = self.exits[origin_exit][3]
dest = self.exits[dest_exit][4]
if dest in self.graph[origin][1]:
self.graph[origin][1].remove(dest)
if dest in self.graph[origin][10]:
self.graph[origin][10].remove(dest)
if self.entrance_shuffle != "Uncoupled" and check_connections and self.is_exit_coupled(origin_exit) and self.is_exit_coupled(dest_exit):
new_origin = self.exits[dest_exit][0]
new_dest = self.exits[origin_exit][0]
self.unlink_exits(new_origin, new_dest, print_log, False, update_graph)
if check_connections and update_graph:
self.update_graph(True,True,True,print_log)
return True
def print_exit_log(self,exit_log=[]):
for origin,dest in exit_log:
print(self.exits[origin][10],"-",self.exits[dest][10])
# Returns lists of origin exits and destination exits that open up new nodes
def get_open_exits(self,check_progression=False):
open_exits = [[],[]]
for node in self.graph:
if not check_progression or self.is_accessible(node):
for exit in self.graph[node][14]:
if self.exits[exit][1] == -1:
open_exits[0].append(exit)
if not check_progression or not self.is_accessible(node):
for exit in self.graph[node][15]:
if self.exits[exit][2] == -1:
open_exits[1].append(exit)
return open_exits
# Takes a list of origin and destination exits, returns a suitable match
def find_exit(self,origin_exits_ls=[],dest_exits_ls=[],print_log=False,check_direction=False,check_progression=False,check_ds_access=False,test=False):
if not origin_exits_ls:
if print_log:
print("ERROR: No accessible exits available")
return False
elif not dest_exits_ls:
if print_log:
print("ERROR: No destination exits available")
return False
origin_exits = origin_exits_ls[:]
dest_exits = dest_exits_ls[:]
done = False
quarantine_o = []
while not done and origin_exits:
origin_exit = 0
while not origin_exit and origin_exits:
origin_exit = origin_exits.pop(0)
origin = self.exits[origin_exit][3]
sister_exit = self.exits[origin_exit][0]
if self.exits[origin_exit][1] != -1 or (check_progression and not self.is_accessible(origin)):
origin_exit = 0
if not origin_exit:
if print_log:
print("ERROR: No accessible exits available")
return False
direction = self.exit_direction(origin_exit)
dest_exit = 0
quarantine_d = []
while not done and dest_exits:
try_link = False
while not dest_exit and dest_exits:
dest_exit = dest_exits.pop(0)
dest = self.exits[dest_exit][4]
if self.exits[dest_exit][2] != -1 or (check_progression and self.is_accessible(dest)):
dest_exit = 0
if not dest_exit:
if print_log:
print("ERROR: No destination exits available")
return False
direction_new = self.exit_direction(dest_exit)
if dest_exit != sister_exit and (not check_direction or direction_new == direction):
try_link = True
if self.link_exits(origin_exit,dest_exit,print_log,self.entrance_shuffle != "Uncoupled",True):
if True: # or not check_ds_access or self.check_ds_access(dest):
done = True
origin_final = origin_exit
dest_final = dest_exit
if not done:
quarantine_d.append(dest_exit)
if try_link:
self.unlink_exits(origin_exit,dest_exit,print_log,True,True)
dest_exit = 0
if not done:
quarantine_o.append(origin_exit)
dest_exits += quarantine_d
quarantine_d.clear()
if not done:
if print_log:
print("ERROR: No suitable links could be found - in quarantine:",quarantine_o)
return False
# Clean up O/D lists
origin_exits += quarantine_o
for exit in origin_exits:
if self.exits[exit][1] != -1:
origin_exits.remove(exit)
for exit in dest_exits:
if self.exits[exit][2] != -1:
dest_exits.remove(exit)
return [origin_final,dest_final,origin_exits,dest_exits]
# Check if you can access one node from another
def check_access(self,origin=-1,dest=-1,check_mutual=False,print_log=False):
if origin not in self.graph or dest not in self.graph:
return False
if self.graph[origin][7] or self.graph[dest][7]:
return False
success = False
if origin == dest or dest in self.graph[origin][10]:
success = True
to_visit = self.graph[origin][10][:]
visited = [origin]
while not success and to_visit:
node = to_visit.pop(0)
visited.append(node)
if not self.graph[node][7] and dest in self.graph[node][10]:
success = True
else:
for x in self.graph[node][10]:
if x not in to_visit+visited:
to_visit.append(x)
if not check_mutual or not success:
return success
return self.check_access(dest,origin,False,print_log)
# Build islands, i.e. mutually-accessible nodes
def build_islands(self,print_log=False):
islands = []
visited = []
start_island = []
for node in self.graph:
if node not in visited and self.graph[node][2]:
to_visit = [node]
new_nodes = []
origin_exits = []
dest_exits = []
origin_logic = []
dest_logic = []
is_start = False
is_island = False
while to_visit:
x = to_visit.pop(0)
visited.append(x)
new_nodes.append(x)
if 0 in self.graph[x][8]:
is_start = True
for exit in self.graph[x][14]:
if self.exits[exit][1] == -1:
origin_exits.append(exit)
for exit in self.graph[x][15]:
if self.exits[exit][2] == -1:
dest_exits.append(exit)
for edge in self.graph[x][12]:
if self.logic[edge][0] == 0:
origin_logic.append(edge)
for edge in self.graph[x][13]:
if self.logic[edge][0] == 0:
dest_logic.append(edge)
for y in self.graph[x][10]:
if y not in visited+to_visit:
if self.check_access(x,y,True,print_log):
to_visit.append(y)
island = [new_nodes,origin_exits,dest_exits,origin_logic,dest_logic]
if is_start:
start_island = island
else:
islands.append(island)
return [start_island,islands]
# Entrance randomizer
def shuffle_exits(self,print_log=False):
# Map passages and internal dungeon exits to graph and list all available exits
one_way_exits = []
for x in self.exits:
if self.is_exit_coupled(x) and (not self.exits[x][3] or not self.exits[x][4]): # Map missing O/D data for coupled exits
xprime = self.exits[x][0]
self.exits[x][3] = self.exits[xprime][4]
self.exits[x][4] = self.exits[xprime][3]
if not self.exits[x][1] and (self.exits[x][5] or self.exits[x][6]) and not self.exits[x][7] and (not self.exits[x][8] or self.exits[x][9]):
self.exits[x][1] = -1 # Mark exit for shuffling
self.exits[x][2] = -1
if not self.is_exit_coupled(x):
one_way_exits.append(x)
self.graph[self.exits[x][3]][14].append(x)
self.graph[self.exits[x][4]][15].append(x)
# Preserve Mu key door link
self.link_exits(310,310,print_log)
# Set aside Jeweler's final exit in RJH seeds
if self.goal == "Red Jewel Hunt":
self.link_exits(720,720,print_log)
# If in Coupled mode, map one_way exits first
exit_log = []
if self.entrance_shuffle == "Coupled":
one_way_dest = one_way_exits[:]
random.shuffle(one_way_dest)
while one_way_exits:
exit1 = one_way_exits.pop()
exit2 = one_way_dest.pop()
self.link_exits(exit1, exit2, print_log, False)
exit_log.append([exit1,exit2])
if print_log:
print( "One-way exits mapped")
# Assume all items and abilities
all_items = self.list_item_pool(1) + self.list_item_pool(2)
self.items_collected = all_items
self.update_graph(True,True,True,print_log)
if print_log:
print(" Graph updated. Beginning exit shuffle...")
# for x in self.graph:
# print(x,self.graph[x])
# Build world skeleton with islands
self.unsolve()
island_result = self.build_islands()
start_island = island_result[0]
islands = island_result[1]
islands_built = []
traverse_result = self.traverse()
visited = traverse_result[0]
origin_exits = []
for node in visited:
origin_exits += self.graph[node][14]
if print_log:
# i = 0
# for x in islands:
# i += 1
# print("Island",i,x[1],x[2])
# for y in x[0]:
# print("-",self.graph[y][5])
print(" Assembling islands...")
random.shuffle(islands)
check_direction = True
check_progression = True
quarantine = []
while islands:
island = islands.pop(0)
nodes_new = island[0]
origin_exits_new = island[1]
dest_exits_new = island[2]
# if print_log:
# for y in nodes_new:
# print("-",self.graph[y][5])
if not dest_exits_new or not origin_exits_new or self.is_accessible(nodes_new[0]):
if print_log and False:
print(" NOT ELIGIBLE")
else:
if (check_progression and not origin_exits_new) or (self.entrance_shuffle == "Coupled" and (len(origin_exits_new) < 2 or len(dest_exits_new) < 2)):
quarantine.append(island)
# if print_log:
# print(" REJECTED")
else:
# if print_log:
# print(" ATTEMPTING...")
random.shuffle(origin_exits)
random.shuffle(dest_exits_new)
result = self.find_exit(origin_exits,dest_exits_new,print_log,check_direction,True)
if not result:
quarantine.append(island)
else:
traverse_result = self.traverse(island[0])
visited += traverse_result[0]
progression_result = self.get_open_exits()
origin_exits = progression_result[0]
check_direction = True
if not islands:
if check_direction:
check_direction = False
islands += quarantine
quarantine.clear()
elif check_progression:
check_progression = False
check_direction = True
islands += quarantine
quarantine.clear()
if print_log:
print(" Island construction complete")
# Check island Dark Space access, map exits accordingly
self.reset_progress()
#self.initialize_ds()
self.update_graph(True,True,True)
island_result = self.build_islands()
islands = island_result[1]
islands_no_ds = []
for island in islands:
if self.is_accessible(island[0][0]) and not self.check_ds_access(island[0][0]):
islands_no_ds.append(island)
if islands_no_ds:
if print_log:
print("Islands with no DS access:")
i = 0
for x in islands_no_ds:
i += 1
print("Island",x)
for y in x[0]:
print("-",self.graph[y][5])
dest_exits_ds = []
for node in self.graph:
if node not in visited and self.check_ds_access(node):
for exit in self.graph[node][15]:
if self.exits[exit][2] == -1:
dest_exits_ds.append(exit)
while islands_no_ds:
island = islands_no_ds.pop(0)
result = self.find_exit(island[1],dest_exits_ds,print_log,check_direction)
if not result:
if print_log:
print("ERROR: Could not find Dark Space access")
return False
else:
dest_exits_ds = result[3]
if print_log:
print(" Dark Space access check successful")
# Clean up the rest of the exits
self.reset_progress()
self.update_graph(True,True,True)
self.traverse()
check_progression = True
check_direction = True
while origin_exits:
progression_result = self.get_open_exits(check_progression)
origin_exits = progression_result[0]
dest_exits = progression_result[1]
random.shuffle(origin_exits)
random.shuffle(dest_exits)
if origin_exits:
result = self.find_exit(origin_exits,dest_exits,print_log,check_direction,check_progression,True,False)
if result:
origin_exit = result[0]
dest_exit = result[1]
dest = self.exits[dest_exit][4]
self.traverse([dest])
elif check_direction:
check_direction = False
elif check_progression:
check_progression = False
check_direction = True
if print_log:
print(" Finished mapping progression exits")
else:
if print_log:
print("WARNING: This shouldn't happen")
origin_exits = []
# Quality check for missing exits
origin_exits = []
dest_exits = []
for exit in self.exits:
if self.exits[exit][1] == -1:
if print_log:
print("How'd we miss this one??", self.exits[exit][10])
origin_exits.append(exit)
if self.exits[exit][2] == -1:
if print_log:
print("This one too??", self.exits[exit][10])
dest_exits.append(exit)
while origin_exits:
origin_exit = origin_exits.pop(0)
if not dest_exits:
if print_log:
print("ERROR: Entrance rando failed")
return False
dest_exit = dest_exits.pop(0)
self.link_exits(origin_exit,dest_exit,print_log,self.entrance_shuffle != "Uncoupled")
# Wrap it up
# self.reset_progress()
# self.update_graph(True,True,True)
if print_log:
print("Entrance rando successful!")
return True
def initialize_ds(self):
# Clear DS access data from graph
for x in self.graph:
self.graph[x][4] = 0
self.graph[x][9].clear()
# Find nodes that contain Dark Spaces
pyramid_ds_id = 130 # Special case for Pyramid DS
self.ds_locations = [pyramid_ds_id]
self.ds_nodes = [self.item_locations[pyramid_ds_id][0]]
self.freedan_locations = self.ds_locations[:]
self.freedan_nodes = self.ds_nodes[:]
for x in self.item_locations:
if self.item_locations[x][1] == 2:
self.ds_locations.append(x)
self.ds_nodes.append(self.item_locations[x][0])
if not self.is_sublist(self.item_locations[x][4], [64, 65, 66]) and self.item_locations[x][3] not in [61,62,63,64,65,66]:
self.freedan_locations.append(x)
self.freedan_nodes.append(self.item_locations[x][0])
return True
# Translates logic and exits to world graph
def update_graph(self,update_logic=True,update_ds=True,update_exits=False,print_log=False):
if print_log:
print("Updating graph...")
if update_exits:
for exit in self.exits:
if exit > 21 or self.exits[exit][5] or exit in self.exits_detailed:
# Check if exit has been shuffled
if self.exits[exit][1] > 0:
new_exit = self.exits[exit][1]
elif self.exits[exit][1] == 0:
new_exit = exit
else:
new_exit = -1
# Get exit origin
if new_exit > 0:
origin = self.exits[exit][3]
if not origin and self.is_exit_coupled(exit):
sister_exit = self.exits[exit][0]
origin = self.exits[sister_exit][4]
self.exits[exit][3] = origin
# Get (new) exit destination
if self.exits[new_exit][2] == 0 or self.exits[new_exit][2] == exit:
dest = self.exits[new_exit][4]
if not dest and self.is_exit_coupled(new_exit):
sister_exit = self.exits[new_exit][0]
dest = self.exits[sister_exit][3]
self.exits[new_exit][4] = dest
# Translate link into world graph
if origin and dest and (dest not in self.graph[origin][1]):
self.graph[origin][1].append(dest)
if print_log:
print(" Exits updated")
# Update logic edges (except those requiring Freedan access)
if update_logic:
for edge in self.logic:
if not self.logic[edge][3]:
self.check_edge(edge)
if print_log:
print(" Logic updated (item/abilities)")
for node in self.graph:
for x in self.graph[node][1]:
if x not in self.graph[node][10]:
self.graph[node][10].append(x)
for y in self.graph[node][10]:
if node not in self.graph[y][8]:
self.graph[y][8].append(node)
for z in self.graph[node][8]:
if node not in self.graph[z][10]:
self.graph[z][10].append(node)
if print_log:
print(" Graph updated")
if update_ds:
# Map DS access to nodes
self.initialize_ds()
self.update_ds_access(self.ds_nodes,1)
for node in self.freedan_nodes:
self.update_ds_access([node],2,[node])
if print_log:
print(" DS access updated")
# Update logic requiring Freedan access
if update_logic:
for edge in self.logic:
if self.logic[edge][3]:
self.check_edge(edge)
if print_log:
print(" Logic updated (DS access)")
#for x in self.graph:
# print(x,self.graph[x][11],self.graph[x][5])
#print(x,self.graph[x][4],self.graph[x][9],self.graph[x][5])
return True
# Check whether a node's DS access data needs to be updated
def consider_ds_node(self,node,access_mode=1,ds_nodes=[]):
if access_mode == 2:
if not self.graph[node][2] or self.graph[node][7]:
return False
success = False
for x in ds_nodes:
if x not in self.graph[node][9]:
success = True
return success
if not self.graph[node][4]:
return True
return False
# Check if a node has Dark Space access
def check_ds_access(self, start_node=-1, need_freedan=False, items=[]):
if start_node not in self.graph:
return False
if not self.graph[start_node][2] or self.graph[start_node][4] == 2 or (self.graph[start_node][4] == 1 and not need_freedan):
return True
elif not items:
return False
else:
to_visit = [start_node]
visited = []
ds_access = False
while not ds_access and to_visit:
node = to_visit.pop(0)
visited.append(node)
if self.check_ds_access(node,need_freedan):
return True
else:
for edge in self.graph[node][12]:
dest = self.logic[edge][2]
if dest not in visited+to_visit and not self.logic[edge][0] and self.check_edge(edge,items,False):
to_visit.append(dest)
return False
# graph_copy = copy.deepcopy(self.graph)
# self.update_graph(False,True,False)
# result = self.check_ds_access(start_node, need_freedan)
# self.graph = graph_copy
# graph_copy = None
# return result
# Update a node's DS access data - recursive for all backwards-accessible nodes
def update_ds_access(self,nodes=[],access_mode=1,ds_nodes=[]):
if not nodes:
return True
to_visit = []
for node in nodes:
if self.graph[node][4] < access_mode:
self.graph[node][4] = access_mode
for ds_node in ds_nodes:
if ds_node not in self.graph[node][9]:
self.graph[node][9].append(ds_node)
for x in self.graph[node][8]:
if self.consider_ds_node(x,access_mode,ds_nodes):
to_visit.append(x)
return self.update_ds_access(to_visit,access_mode,ds_nodes)
# Check a logic edge to see if prerequisites have been met
def check_edge(self, edge, items=[], update_graph=True, print_log=False):
success = False
if edge not in self.logic:
if print_log:
print("WARNING: Not a valid logic ID:",edge)
return False
elif self.logic[edge][0] == -1:
return False
elif self.logic[edge][0] > 0:
success = True
req_items = []
for req in self.logic[edge][4]:
i = 0
while i < req[1]:
req_items.append(req[0])
i += 1
if self.is_sublist(self.items_collected+items, req_items) and (not self.logic[edge][3] or self.check_ds_access(self.logic[edge][1],True)):
success = True
if success and update_graph:
self.open_edge(edge)
return success
# Open a logic edge and translate results to graph
def open_edge(self, edge=-1, test=False, print_log=False):
if edge not in self.logic:
return False
if self.logic[edge][0] == -1:
if print_log:
print("WARNING: Tried to open an edge that is restricted")
return False
if not self.logic[edge][0] and not test:
self.logic[edge][0] = 1
origin = self.logic[edge][1]
dest = self.logic[edge][2]
return self.new_connection(origin,dest,test)
# Map a new connection (i.e. exit, logic) to graph
def new_connection(self, origin, dest, test=False, print_log=False):
if not test:
# To/from data
if dest not in self.graph[origin][10]:
self.graph[origin][10].append(dest)
if origin not in self.graph[dest][8]:
self.graph[dest][8].append(origin)
# Dark Space access data
if self.graph[dest][4] > self.graph[origin][4]:
self.update_ds_access([origin],self.graph[dest][4],self.graph[dest][9])
# Return list of newly-accessible nodes
if self.is_accessible(origin) and not self.is_accessible(dest):
traverse_result = self.traverse([dest],test,print_log)
return traverse_result[0]
return []
# to_visit = [dest]
# while to_visit:
# node = to_visit.pop(0)
# new_nodes.append(node)
# if not test:
# self.visit_node(node,test,print_log)
# for x in self.graph[node][10]:
# if x != node and x not in to_visit+new_nodes and not self.is_accessible(x):
# to_visit.append(x)
# return new_nodes
def restrict_edge(self, edge=-1):
try:
self.logic[edge][0] = -1
return True
except:
return False
def unrestrict_edge(self, edge=-1):
try:
self.logic[edge][0] = 0 if self.logic[edge][0] != 1 else self.logic[edge][0]
return True
except:
return False
# Initialize World parameters
def initialize(self,print_log=False):
# Manage required items
if 1 in self.dungeons_req:
self.required_items += [3, 4, 7, 8]
if 2 in self.dungeons_req:
self.required_items += [14]
if 3 in self.dungeons_req:
self.required_items += [18, 19]
if 5 in self.dungeons_req:
self.required_items += [38, 30, 31, 32, 33, 34, 35]
if 6 in self.dungeons_req:
self.required_items += [39]
if self.kara == 1:
self.required_items += [2, 9, 23]
elif self.kara == 2:
self.required_items += [11, 12, 15]
elif self.kara == 4:
self.required_items += [26]
elif self.kara == 5:
self.required_items += [28, 66]
# Update inventory space logic
if 3 in self.dungeons_req:
self.item_pool[19][4] = True
if 5 in self.dungeons_req:
self.item_pool[30][4] = True
self.item_pool[31][4] = True
self.item_pool[32][4] = True
self.item_pool[33][4] = True
self.item_pool[34][4] = True
self.item_pool[35][4] = True
self.item_pool[38][4] = True
# Solid Arm can only be required in Extreme
if self.difficulty < 3:
self.exits[21][4] = self.exits[21][3]
# Allow glitches *********************
if "Allow Glitches" in self.variant:
self.graph[0][1].append(601)
self.graph[61][1].append(62) # Moon Tribe: No ability required
self.graph[181][1].append(182) # Sky Garden: Ramp glitch
self.graph[181][1].append(184)
self.graph[182][1].append(185)
self.graph[222][1].append(221) # Mu: Golem skip
self.logic[268][4][1][1] = 0 # Ankor Wat: Earthquaker not required
self.logic[273][4][0][1] = 0 # Ankor Wat: Glasses not required
self.logic[274][4][0][1] = 0
self.item_locations[124][2] = False # Ankor Wat: Dropdown DS has abilities
self.graph[410][1].append(411) # Pyramid: No ability required
self.item_locations[142][2] = False # Pyramid: Bottom DS can have abilities
if not self.fluteless:
self.graph[182][1].append(183) # Sky Garden: cage glitch
self.item_locations[94][2] = False # Great Wall: Slider glitch
self.graph[294][1].append(295)
# Early Firebird
if self.firebird:
self.graph[0][1].append(602)
self.unrestrict_edge(405)
# Zelda 3 Mode
if "Z3 Mode" in self.variant:
# Update item pool
self.item_pool[1][0] = 29 # Red Jewels
self.item_pool[50][0] = 5 # HP upgrades
self.item_pool[51][0] = 2 # DEF upgrades
self.item_pool[52][0] = 3 # STR upgrades
self.item_pool[55][0] = 12 # HP Pieces
# Open Mode
if "Open Mode" in self.variant:
# Update graph logic
self.logic[30][0] = 2 # Lola's Letter
self.logic[31][0] = 2
self.logic[32][0] = 2
self.logic[33][0] = 2 # Memory Melody
self.logic[36][0] = 2 # Teapot
self.logic[38][0] = 2 # Will
self.logic[39][0] = 2
self.logic[40][0] = 2 # Roast
# Remove travel items from pool
self.item_pool[10][0] = 0 # Large Roast
self.item_pool[13][0] = 0 # Memory Melody
self.item_pool[24][0] = 0 # Will
self.item_pool[25][0] = 0 # Teapot
self.item_pool[37][0] = 0 # Lola's Letter
self.item_pool[6][0] += 4 # Herbs
self.item_pool[0][0] += 1 # Nothing
# Chaos mode -- MAY NOT NEED THIS ANYMORE
# if self.logic_mode == "Chaos":
# # Add "Inaccessible" node to graph
# self.graph[INACCESSIBLE] = [False, [], 0, [0,0,0,b"\x00"], 0, "Inaccessible", [], False, [], [], [], [], [], [], [], []]
#
# # Towns can have Freedan abilities
# for x in self.item_locations:
# if self.item_locations[x][4] == [64, 65, 66]:
# self.item_locations[x][4].clear()
#
# Several locked Dark Spaces can have abilities
# ds_unlock = [74, 94, 124, 142]
#
# if 1 not in self.dungeons_req: # First DS in Inca
# ds_unlock.append(29)
# if self.kara != 1: # DS in Underground Tunnel
# ds_unlock.append(19)
# if self.kara != 5: # DS in Ankor Wat garden
# ds_unlock.append(122)
#
# for x in ds_unlock:
# self.item_locations[x][2] = False
# Red Jewel Hunts change the graph
if self.goal == "Red Jewel Hunt":
self.logic[24][2] = 492
self.logic[25][2] = 492
self.logic[26][2] = 492
self.logic[27][2] = 492
del self.logic[406]
del self.logic[407]
# Change graph logic depending on Kara's location
if self.kara == 1:
self.unrestrict_edge(400)
self.graph[49][6].append(20)
elif self.kara == 2:
self.unrestrict_edge(401)
self.graph[150][6].append(20)
# Change "Sam" to "Samlet"
self.location_text[45] = b"\x63\x80\x8c\x8b\x84\xa4"
elif self.kara == 3:
self.unrestrict_edge(402)
self.graph[270][6].append(20)
elif self.kara == 4:
self.unrestrict_edge(403)
self.graph[345][6].append(20)
elif self.kara == 5:
self.unrestrict_edge(404)
self.graph[391][6].append(20)
# Change logic based on which dungeons are required
for x in self.statues:
self.logic[406][4][x][1] = 1
# Change item pool for "player choice" statue requirement variant
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
self.item_pool[100][0] = 0
self.item_pool[101][0] = 0
self.item_pool[102][0] = 0
self.item_pool[103][0] = 0
self.item_pool[104][0] = 0
self.item_pool[105][0] = 0
self.item_pool[106][0] = 6
# Incorporate item locations and logic edges into world graph
for x in self.item_locations:
self.graph[self.item_locations[x][0]][11].append(x)
for y in self.logic:
if self.logic[y][0] != -1:
self.graph[self.logic[y][1]][12].append(y)
self.graph[self.logic[y][2]][13].append(y)
# Random start location
if self.start_mode != "South Cape":
self.start_loc = self.random_start()
if print_log:
print("Start location:",self.item_locations[self.start_loc][9])
if self.start_loc == 19: # Open Lily's door when starting in Underground Tunnel
self.logic[62][0] = 2
# elif self.start_loc == 30: # Inca ramp can hardlock you -- NEW FIX MAKES THIS OBSELETE
# self.graph[83][1].append(82)
elif self.start_loc == 47: # Diamond Mine behind fences
self.graph[131][1].append(130)
if self.start_mode != "South Cape" or self.entrance_shuffle != "None":
self.graph[0][1].remove(22)
self.graph[0][1].append(self.item_locations[self.start_loc][0])
# TEMP - grant Psycho Dash at start for fluteless seeds
if self.fluteless:
self.fill_item(61,self.start_loc,False,True,print_log)
# Boss Shuffle
if "Boss Shuffle" in self.variant:
boss_entrance_idx = [1,4,7,10,13,16,19]
boss_exit_idx = [3,6,9,12,15,18,21]
dungeon = 0
if print_log:
print("Boss order: ",self.boss_order)
while dungeon < 7:
boss = self.boss_order[dungeon]
entrance_old = boss_entrance_idx[dungeon]
entrance_new = boss_entrance_idx[boss-1]
exit_old = boss_exit_idx[boss-1]
exit_new = boss_exit_idx[dungeon]
self.link_exits(entrance_old,entrance_new,print_log)
if self.exits[exit_old][5] or exit_old in self.exits_detailed:
self.link_exits(exit_old,exit_new,print_log)
dungeon += 1
# Overworld shuffle
if "Overworld Shuffle" in self.variant:
if not self.shuffle_overworld(print_log):
if print_log:
print("ERROR: Overworld shuffle failed")
return False
# Shuffle exits
if self.entrance_shuffle != "None":
if not self.shuffle_exits(print_log):
if print_log:
print("ERROR: Entrance rando failed")
return False
self.reset_progress(True)
#self.initialize_ds()
self.update_graph(True,True,True)
# Initialize Dark Space information
if self.logic_mode == "Completable":
if not self.lock_dark_spaces(print_log):
if print_log:
print("ERROR: Could not lock Dark Spaces")
return False
return True
# Update item placement logic after abilities are placed
def check_logic(self,location=0):
abilities = [61, 62, 63, 64, 65, 66]
inaccessible_ls = []
# Check for abilities in critical Dark Spaces
if self.item_locations[19][3] in abilities: # Underground Tunnel
inaccessible_ls += [17, 18]
self.restrict_edge(63)
if self.item_locations[29][3] in abilities: # Inca Ruins
inaccessible_ls += [26, 27, 30, 31, 32]
self.restrict_edge(94)
if (self.item_locations[46][3] in abilities and # Diamond Mine
self.item_locations[47][3] in abilities and
self.item_locations[48][3] in abilities):
self.restrict_edge(118)
if (self.item_locations[58][3] in abilities and # Sky Garden
self.item_locations[59][3] in abilities and
self.item_locations[60][3] in abilities):
self.restrict_edge(131)
self.restrict_edge(132)
self.restrict_edge(144)
self.restrict_edge(147)
self.restrict_edge(148)
self.restrict_edge(149)
self.restrict_edge(150)
self.restrict_edge(151)
if self.item_locations[94][3] in abilities: # Great Wall
self.graph[700] = [False, [], 0, [3,15,0,b"\x00"], 0, "Great Wall - Behind Spin", [], False, [], [], [], [], [], [], [], []]
self.logic[700] = [0, 296, 700, False, [[63, 1]]]
self.item_locations[93][0] = 700
self.logic[222][3] = True
if self.item_locations[93][3] in abilities:
inaccessible_ls += [95]
self.restrict_edge(223)
self.restrict_edge(224)
if self.item_locations[122][3] in abilities: # Ankor Wat
inaccessible_ls += [117, 118, 119, 120, 121]
self.restrict_edge(267)
self.restrict_edge(268)
self.restrict_edge(269)
self.restrict_edge(270)
self.restrict_edge(271)
self.restrict_edge(272)
if self.item_locations[142][3] in abilities: # Pyramid
inaccessible_ls += [133,134,136,139,140]
self.restrict_edge(300)
self.restrict_edge(301)
self.restrict_edge(302)
self.restrict_edge(303)
self.restrict_edge(304)
self.restrict_edge(306)
self.restrict_edge(307)
self.restrict_edge(313)
# Change graph node for inaccessible_ls locations
for x in inaccessible_ls:
if x in self.graph[self.item_locations[x][0]][11]:
self.graph[self.item_locations[x][0]][11].remove(x)
self.item_locations[x][0] = INACCESSIBLE
# Simulate inventory
def get_inventory(self,start_items=[],item_destinations=[],new_nodes=[]):
if not start_items:
start_items = self.items_collected[:]
if not item_destinations:
item_destinations = self.item_destinations[:]
inventory_temp = []
for item in start_items:
if self.item_pool[item][4]:
inventory_temp.append(item)
# negative_inventory = []
# for node in self.graph:
# if self.is_accessible(node) or node in new_nodes:
# negative_inventory += self.graph[node][6]
inventory = []
while inventory_temp:
item = inventory_temp.pop(0)
if item in item_destinations:
item_destinations.remove(item)
else:
inventory.append(item)
return inventory
# Return list of accessible nodes
def list_accessible_nodes(self):
accessible = []
for x in self.graph:
if self.is_accessible(x):
accessible.append(x)
return accessible
def print_accessible_nodes(self):
print("Accessible nodes:")
for x in self.graph:
if self.is_accessible(x):
print("",self.graph[x][5])
def print_inaccessible_nodes(self):
print("Inccessible nodes:")
for x in self.graph:
if not self.is_accessible(x):
print("",self.graph[x][5])
# Takes a random seed and builds out a randomized world
def randomize(self, seed_adj=0, print_log=False):
random.seed(self.seed + seed_adj)
if self.race_mode:
for i in range(random.randint(100, 1000)):
_ = random.randint(0, 10000)
if self.race_mode:
for i in range(random.randint(100, 1000)):
_ = random.randint(0,10000)
if not self.initialize(print_log):
if print_log:
print("ERROR: Could not initialize world")
return False
if print_log:
print("Initialization complete")
# Initialize and shuffle location list
item_locations = self.list_item_locations()
random.shuffle(item_locations)
# Fill the Mystic Statues and room-clear rewards
self.fill_statues()
self.map_rewards()
# Forward fill progression items with Monte Carlo method
# Continue to place progression items until goal is reached
done = False
goal = False
cycle = 0
place_abilities = True
self.items_collected = self.list_item_pool(1) # Assume all items for ability placement
if print_log:
print("Beginning ability placement...")
while not done:
cycle += 1
if print_log:
print(" Cycle",cycle)
if cycle > MAX_CYCLES:
if print_log:
print("ERROR: Max cycles exceeded")
return False
self.traverse()
if place_abilities:
to_place = self.list_item_pool(2)
if not to_place:
done = True
else:
random.shuffle(to_place)
progress = False
while not progress and to_place:
ability = to_place.pop(0)
progress = self.forward_fill([ability],item_locations,False,self.logic_mode == "Chaos",print_log)
if progress:
self.check_logic()
else:
if print_log:
print("ERROR: Could not place any abilities")
return False
if done:
place_abilities = False
done = False
if print_log:
print(" Finished placing abilities")
print("Beginning item placement...")
# Randomly place non-progression items
self.traverse()
non_prog_items = self.list_item_pool(0, [], 2) + self.list_item_pool(0, [], 3)
for item in non_prog_items:
if item in self.items_collected:
self.items_collected.remove(item)
self.forward_fill(non_prog_items, item_locations, False, self.logic_mode == "Chaos", print_log)
# List and shuffle remaining key items
item_list = self.list_item_pool()
#random.shuffle(item_list)
# Reset graph, prepare for item placement
self.reset_progress(True)
self.update_graph()
else:
if len(self.get_inventory()) > MAX_INVENTORY:
goal = False
if print_log:
print("WARNING: Inventory capacity exceeded")
else:
goal = self.is_accessible(492)
# Get list of new progression options
#if print_log:
# print("Open edges:",self.open_edges)
# print("Open locations:",self.open_locations)
progression_result = self.progression_list()
if print_log:
print("Progression options: {")
print(" ",progression_result[0])
print(" ",progression_result[1])
print(" ",progression_result[2],"}")
progression_list = progression_result[0]
is_progression = (progression_result != [[],[],[]])
done = goal and (self.logic_mode != "Completable" or not is_progression)
if not done:
if not is_progression:
if print_log:
print("ERROR: Couldn't progress any further")
self.print_graph()
return False
progress = False
key = random.uniform(0,100)
while not progress and progression_list:
progression_mc = self.monte_carlo(progression_list)
idx = 0
for x in progression_mc:
if key <= x[0] and not idx:
idx = x[1]
items = progression_list.pop(idx)
if self.forward_fill(items, item_locations, False, self.logic_mode == "Chaos", print_log):
progress = True
# if print_log:
# print(" Placed progression items successfully")
if not progress:
if print_log:
print(" No suitable progression found, attempting to make room...")
if not self.make_room(progression_result,print_log):
if print_log:
print("ERROR: Could not find progression")
self.print_graph()
return False
if print_log:
print("Placing junk items...")
junk_items = self.list_item_pool()
#random.shuffle(junk_items)
self.random_fill(junk_items, item_locations, False, print_log)
if print_log:
print("Item placement complete, beginning final traversal...")
self.reset_progress(True)
self.update_graph()
self.traverse([],False,print_log)
if print_log:
locked_ds = [19,29,122]
for x in locked_ds:
if self.item_locations[x][3] in [61, 62, 63, 64, 65, 66]:
print("WARNING:",self.item_locations[x][9],"has an ability")
if self.logic_mode == "Completable" and self.goal != "Red Jewel Hunt":
completed = True
for node in self.graph:
if not self.graph[node][0] and node <600:
if print_log:
print("Can't reach ",self.graph[node][5])
completed = False
else:
completed = self.graph[492][0]
if not completed:
if print_log:
self.print_graph()
print("ERROR: Seed failed, trying again...")
print("")
return False
if print_log:
print("Writing hints...")
placement_log = self.placement_log[:]
random.shuffle(placement_log)
self.in_game_spoilers(placement_log)
if print_log:
print("Randomization complete!")
return True
def print_graph(self):
print("Open edges:",self.open_edges)
print("Open locations:",self.open_locations)
for node in self.graph:
print(node,self.graph[node])
# Prepares dataset to give in-game spoilers
def in_game_spoilers(self, placement_log=[]):
for x in placement_log:
item = x[0]
location = x[1]
if location not in self.free_locations and location in self.location_text:
if item in self.required_items or item in self.good_items or location in self.trolly_locations:
spoiler_str = b"\xd3" + self.location_text[location] + b"\xac\x87\x80\xa3\xcb"
spoiler_str += self.item_text_short[item] + b"\xc0"
# No in-game spoilers in Expert mode
if self.difficulty >= 3:
spoiler_str = b"\xd3\x8d\x88\x82\x84\xac\xa4\xa2\xa9\xac\x83\x8e\x83\x8e\x8d\x86\x8e\x4f\xc0"
self.spoilers.append(spoiler_str)
# print item, location
# Prints item and ability locations
def generate_spoiler(self, version=""):
if self.kara == 1:
kara_txt = "Edward's Castle"
elif self.kara == 2:
kara_txt = "Diamond Mine"
elif self.kara == 3:
kara_txt = "Angel Dungeon"
elif self.kara == 4:
kara_txt = "Mt. Kress"
elif self.kara == 5:
kara_txt = "Ankor Wat"
if self.difficulty == 0:
difficulty_txt = "Easy"
elif self.difficulty == 1:
difficulty_txt = "Normal"
elif self.difficulty == 2:
difficulty_txt = "Hard"
elif self.difficulty == 3:
difficulty_txt = "Extreme"
spoiler = dict()
spoiler["version"] = version
spoiler["seed"] = str(self.seed)
spoiler["date"] = str(datetime.utcfromtimestamp(time.time()))
spoiler["goal"] = str(self.goal)
spoiler["entrance_shuffle"] = str(self.entrance_shuffle)
spoiler["start_location"] = self.item_locations[self.start_loc][9].strip()
spoiler["logic"] = str(self.logic_mode)
spoiler["difficulty"] = str(difficulty_txt)
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
spoiler["statues_required"] = self.statues_required
else:
spoiler["statues_required"] = self.statues
spoiler["boss_order"] = self.boss_order
spoiler["kara_location"] = kara_txt
spoiler["jeweler_amounts"] = self.gem
spoiler["inca_tiles"] = self.incatile
spoiler["hieroglyph_order"] = self.hieroglyphs
items = []
for x in self.item_locations:
if x < 500:
item = self.item_locations[x][3]
location_name = self.item_locations[x][9].strip()
item_name = self.item_pool[item][3]
items.append({"location": location_name, "name": item_name})
spoiler["items"] = items
if "Overworld Shuffle" in self.variant:
overworld_links = []
for continent_id, continent_data in self.overworld_menus.items():
continent_name = continent_data[7]
region_name = self.overworld_menus[continent_data[0]][8]
overworld_links.append({"region": region_name, "continent": continent_name})
spoiler["overworld_entrances"] = overworld_links
if self.entrance_shuffle != "None":
exit_links = []
for exit in self.exits:
exit_name = self.exits[exit][10]
linked_exit = self.exits[exit][1]
if not linked_exit:
exit_linked_name = exit_name
else:
exit_linked_name = self.exits[linked_exit][10]
exit_links.append({"entrance": exit_name, "exit": exit_linked_name})
spoiler["exit_links"] = exit_links
self.spoiler = spoiler
#self.complete_graph_visualization()
def complete_graph_visualization(self,print_log=False):
self.graph_viz = graphviz.Digraph(graph_attr=[('concentrate','true'),
('rankdir', 'TB')], strict=True)
graph = self.graph_viz
areas = dict()
area_names = ["Overworld",
"South Cape",
"Edward's Castle",
"Itory Village",
"Moon Tribe",
"Inca Ruins",
"Diamond Coast",
"Freejia",
"Diamond Mine",
"Neil's Cottage",
"Nazca Plain",
"Seaside Palace",
"Mu",
"Angel Village",
"Watermia",
"Great Wall",
"Euro",
"Mt. Kress",
"Native's Village",
"Ankor Wat",
"Dao",
"Pyramid",
"Babel",
"Jeweler's Mansion"]
graph.attr('node', shape='box')
for area_id in range(len(area_names)):
areas[area_id] = list()
for area_id in range(1,len(area_names)):
node_name = f"area_{area_id}"
node_content = area_names[area_id]
#areas[0].append((node_name, node_content))
for region_id, region_data in self.graph.items():
area = region_data[3][1]
node_name = f"region_{region_id}"
node_content = region_data[5]
areas[area].append((node_name, node_content))
for area_id, area_nodes in areas.items():
for node_id, node_content in area_nodes:
graph.node(node_id, node_content)
#with graph.subgraph(name=f"cluster_{area_id}") as c:
# c.attr(label=area_names[area_id],
# color="black")
# for node_id, node_content in area_nodes:
# if area_id != 0:
# c.node(node_id, node_content)
# else:
# graph.node(node_id,node_content)
for region_id, region_data in self.graph.items():
start_area = region_data[3][1]
node_name = f"region_{region_id}"
area_name = f"area_{start_area}"
for accessible_region_id in region_data[1]:
end_area = self.graph[accessible_region_id][3][1]
end_area_name = f"area_{end_area}"
accessible_node_name = f"region_{accessible_region_id}"
graph.edge(node_name, accessible_node_name)
#if start_area != 0 and end_area != 0:
# if start_area != end_area:
# graph.edge(area_name, end_area_name)
# else:
# graph.edge(node_name, accessible_node_name)
#elif start_area != 0:
# graph.edge(area_name, accessible_node_name)
#elif end_area != 0:
# graph.edge(node_name, end_area_name)
#else:
# graph.edge(node_name, accessible_node_name)
for _, logic_data in self.logic.items():
needed_items = logic_data[2]
enough_items = True
for item_id, quantity in needed_items:
existing_quantity = 0
if item_id not in self.item_pool:
if print_log:
print("Missing info about item:", item_id)
else:
existing_quantity = self.item_pool[item_id][0]
for _, location_data in self.item_locations.items():
if location_data[2] and item_id == location_data[3]:
existing_quantity += 1
if existing_quantity < quantity:
enough_items = False
break
if not enough_items:
continue
start_name = f"region_{logic_data[0]}"
dest_name = f"region_{logic_data[1]}"
start_area = self.graph[logic_data[0]][3][1]
end_area = self.graph[logic_data[1]][3][1]
area_name = f"area_{start_area}"
end_area_name = f"area_{end_area}"
graph.edge(start_name, dest_name)
#if start_area != 0 and end_area != 0:
# if start_area != end_area:
# graph.edge(area_name, end_area_name)
# else:
# graph.edge(start_name, dest_name)
#elif start_area != 0:
# graph.edge(area_name, dest_name)
#elif end_area != 0:
# graph.edge(start_name, end_area_name)
#else:
# graph.edge(start_name, dest_name)
per_region_item_node = dict()
item_location_color_map = {
1: "yellow",
2: "blue",
3: "green",
4: "white"
}
graph.attr('node', shape='plaintext')
for itemloc_id, itemloc_data in self.item_locations.items():
# Add Item_location_nodes
location_region = itemloc_data[0]
region_node_name = f"region_{location_region}"
region_item_node_name = f"region_itemnode_{location_region}"
if (itemloc_data[1] != 2 or itemloc_data[3] != 0) and itemloc_data[1] != 4:
if region_item_node_name not in per_region_item_node:
per_region_item_node[region_item_node_name] = []
graph.edge(region_node_name, f"{region_item_node_name}")
per_region_item_node[region_item_node_name].append((itemloc_id))
for region_item_node_name, locations_id in per_region_item_node.items():
node_content = "<<table border='0' cellborder='1' cellspacing='0'>"
for itemloc_id in locations_id:
itemloc_data = self.item_locations[itemloc_id]
item_name = self.item_pool[itemloc_data[3]][3]
location_name = itemloc_data[9]
if ":" in location_name:
location_name = ":".join(location_name.split(':')[1:])
location_type = itemloc_data[1]
node_content += f"""<tr>
<td ALIGN='left' bgcolor='{item_location_color_map[location_type]}'>{location_name.strip()}</td>
<td align='center'>{item_name}</td>
</tr>"""
node_content += "</table>>"
graph.node(region_item_node_name, node_content)
def print_enemy_locations(self, filepath, offset=0):
f = open(filepath, "r+b")
rom = f.read()
for enemy in self.enemies:
print(self.enemies[enemy][3])
done = False
addr = int("c8200", 16) + offset
while not done:
addr = rom.find(self.enemies[enemy][1], addr + 1)
if addr < 0 or addr > int("ce5e4", 16) + offset:
done = True
else:
f.seek(addr)
# f.write(b"\x55\x87\x8a\x05")
print(" ", addr, hex(addr), binascii.hexlify(f.read(4)))
f.close
# Prints item and ability locations
def print_spoiler(self):
if self.kara == 1:
kara_txt = "Edward's Castle"
elif self.kara == 2:
kara_txt = "Diamond Mine"
elif self.kara == 3:
kara_txt = "Angel Dungeon"
elif self.kara == 4:
kara_txt = "Mt. Kress"
elif self.kara == 5:
kara_txt = "Ankor Wat"
print("")
print("Seed > ", self.seed)
print("Statues Required > ", self.statues)
print("Kara Location > ", kara_txt)
print("Jeweler Reward Amounts > ", self.gem)
print("Inca Tile (column, row) > ", self.incatile)
print("Hieroglyph Order > ", self.hieroglyphs)
print("")
for x in self.item_locations:
item = self.item_locations[x][3]
location_name = self.item_locations[x][9]
item_name = self.item_pool[item][3]
print(location_name, " > ", item_name)
# Modifies game ROM to reflect the current state of the World
def write_to_rom(self, f, rom_offset=0, print_log=False):
# Room-clearing rewards
idx_tier2 = 0
idx_tier3 = 0
idx_tier4 = 0
for map in self.maps:
reward_tier = self.maps[map][2][1]
if reward_tier > 0:
reward = self.maps[map][2][0]
f.seek(int("1aade", 16) + map + rom_offset)
f.write(binascii.unhexlify(format(reward,"02x")))
# Populate player level logic
if reward_tier == 2:
f.seek(int("f4a7", 16) + 4*idx_tier2 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier2 += 1
elif reward_tier == 3:
f.seek(int("f4bf", 16) + 4*idx_tier3 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier3 += 1
elif reward_tier == 4:
f.seek(int("f4d7", 16) + 4*idx_tier4 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier4 += 1
#print("maps done")
# Items and abilities
for x in self.item_locations:
type = self.item_locations[x][1]
# Write items to ROM
if type == 1:
item = self.item_locations[x][3]
# print "Writing item ", item
item_addr = self.item_locations[x][5]
item_code = self.item_pool[item][2]
text1_addr = self.item_locations[x][6]
text2_addr = self.item_locations[x][7]
text3_addr = self.item_locations[x][8]
if item in self.item_text_long:
text_long = self.item_text_long[item]
else:
text_long = ""
if item in self.item_text_short:
text_short = self.item_text_short[item]
else:
text_short = ""
# Write item code to memory
if item_code and item_addr:
f.seek(int(item_addr, 16) + rom_offset)
f.write(item_code)
# Write item text, if appropriate
if text1_addr and text_long:
f.seek(int(text1_addr, 16) + rom_offset)
f.write(text_long)
# f.write(b"\xd3")
# f.write(text_short)
f.write(b"\xc9\x0a\xc0")
# Write "inventory full" item text, if appropriate
if text2_addr and text_long:
f.seek(int(text2_addr, 16) + rom_offset)
# f.write(b"\xd3")
# f.write(text_short)
f.write(text_long)
f.write(b"\xcb\x45\x65\x4b\x4b\x4f\xc9\x0a\xc0") # Just says "FULL!"
# Write jeweler inventory text, if apprpriate
if text3_addr and text_short:
f.seek(int(text3_addr, 16) + rom_offset)
f.write(text_short)
# Write abilities to ROM
elif type == 2: # Check if filled
ability = self.item_locations[x][3]
ability_addr = self.item_locations[x][5]
map = self.item_locations[x][8]
# Change Dark Space type in event table
if ability in [61, 62, 63, 64, 65, 66]:
f.seek(int(ability_addr, 16) + rom_offset)
f.write(b"\x05")
# Update ability text table
if ability == 61: # Psycho Dash
# f.seek(int("8eb5a",16)+2*i+rom_offset)
f.seek(int("8eb5a", 16) + rom_offset)
f.write(map)
if ability == 62: # Psycho Slide
f.seek(int("8eb5c", 16) + rom_offset)
f.write(map)
if ability == 63: # Spin Dash
f.seek(int("8eb5e", 16) + rom_offset)
f.write(map)
if ability == 64: # Dark Friar
f.seek(int("8eb60", 16) + rom_offset)
f.write(map)
if ability == 65: # Aura Barrier
f.seek(int("8eb62", 16) + rom_offset)
f.write(map)
if ability == 66: # Earthquaker
f.seek(int("8eb64", 16) + rom_offset)
f.write(map)
#print("items/abilities done")
# Special code for 2-item event in Dao
item1 = self.item_locations[125][3]
item2 = self.item_locations[126][3]
f.seek(int("8fde0", 16) + rom_offset)
f.write(b"\xd3" + self.item_text_short[item1] + b"\xcb")
f.write(self.item_text_short[item2] + b"\xc9\x0a\xcf\xce")
# Write in-game spoilers
i = 0
for addr in self.spoiler_addresses:
f.seek(int(self.spoiler_addresses[addr], 16) + rom_offset)
if i < len(self.spoilers):
f.write(self.spoilers[i])
i += 1
#print("spoilers done")
# Enemizer
if self.enemizer != "None":
# "Fix" Ankor Wat Gorgons so they don't fall from the ceiling
f.seek(int("bb825", 16) + rom_offset)
f.write(b"\x00\x00\x00\x02\x27\x0F\x02\xC1\x4C\xA0\xB8\x6B")
# Run enemizer
self.enemize(f, rom_offset)
# self.parse_maps(f,rom_offset)
# Random start location
if self.start_mode != "South Cape" or self.entrance_shuffle != "None":
# print self.start_loc
map_str = self.item_locations[self.start_loc][8] + self.item_locations[self.start_loc][7]
# Change game start location
f.seek(int("be517", 16) + rom_offset)
f.write(map_str)
# Change Dark Space warp location
f.seek(int("8dbea", 16) + rom_offset)
f.write(map_str)
# Change Dark Space warp text
map_name = self.location_text[self.start_loc]
f.seek(int("8de1f", 16) + rom_offset)
f.write(map_name + b"\x0D\xCB\xAC\x4D\x8E\xCB\xAC\x69\x84\xA3\xCA")
#print("random start done")
# Overworld shuffle
if "Overworld Shuffle" in self.variant:
ow_patch_data = []
for entry in self.overworld_menus:
# Prepare ROM edits
new_entry = self.overworld_menus[entry][0]
f.seek(int(self.overworld_menus[new_entry][4], 16) + rom_offset)
ow_patch_data.append([self.overworld_menus[entry][4], f.read(8)])
f.seek(int(self.overworld_menus[new_entry][6], 16) + rom_offset)
ow_patch_data.append([self.overworld_menus[entry][6], f.read(11)])
ow_patch_data.append([self.overworld_menus[new_entry][5], self.overworld_menus[entry][1]])
for x in ow_patch_data:
f.seek(int(x[0], 16) + rom_offset)
f.write(x[1])
#print("overworld shuffle done")
# Entrance shuffle
er_patch_data = []
for exit in self.exits:
#self.exits[exit][0] = exit #TESTING ONLY
# Prepare ROM edits
new_exit = self.exits[exit][1]
if new_exit and self.exits[exit][5]: # and exit != new_exit:
try:
if self.exits[new_exit][6]:
new_data = self.exits[new_exit][6]
else:
f.seek(int(self.exits[new_exit][5], 16) + rom_offset)
new_data = f.read(8)
er_patch_data.append([self.exits[exit][5], new_data])
except:
if print_log:
print("ERROR: exit data invalid",exit,new_exit)
for exit in self.exits_detailed:
new_exit = self.exits[exit][1]
if new_exit:
map_str = self.exits[new_exit][6]
map_id = map_str[0:1]
xcoord = int.to_bytes(int.from_bytes(map_str[1:3], byteorder="little") // 16, 2, byteorder='little')
ycoord = int.to_bytes(int.from_bytes(map_str[3:5], byteorder="little") // 16, 2, byteorder='little')
facedir = map_str[5:6]
camera = map_str[6:8]
# print(map_id,xcoord,ycoord,facedir,camera)
er_patch_data.append([self.exits_detailed[exit][0], map_id])
er_patch_data.append([self.exits_detailed[exit][1], xcoord])
er_patch_data.append([self.exits_detailed[exit][2], ycoord])
if self.exits_detailed[exit][3] != "":
er_patch_data.append([self.exits_detailed[exit][3], facedir])
er_patch_data.append([self.exits_detailed[exit][4], camera])
for x in er_patch_data:
try:
f.seek(int(x[0], 16) + rom_offset)
f.write(x[1])
except:
if print_log:
print("ERROR: Not a valid address", x)
#print("entrance shuffle done")
# Check for additional switches that need to be set
switch_str = []
if self.start_loc == 19: # Open Lily's door when starting in Underground Tunnel
switch_str.append(b"\x02\xcd\x13\x01")
# elif self.start_loc == 30: # Inca ramp can hardlock you -- NEW FIX MAKES THIS OBSELETE
# switch_str.append(b"\x02\xcd\x0c\x01")
elif self.start_loc == 47: # Diamond Mine behind fences
switch_str.append(b"\x02\xcd\x34\x01\x02\xcd\x35\x01\x02\xcd\x36\x01")
if "Open Mode" in self.variant:
switch_str.append(b"\x02\xcc\x11\x02\xcc\x14\x02\xcc\x1f\x02\xcc\x2a\x02\xcc\x41")
if self.enemizer != "None" and self.enemizer != "Limited":
switch_str.append(b"\x02\xcc\xa0\x02\xcc\xa1")
f.seek(int("1ffb0", 16) + rom_offset)
for x in switch_str:
f.write(x)
f.write(b"\x6b")
#print("switches done")
# Swapped exits
# for exit in self.exits:
# if self.exits[exit][1] > 0:
# to_exit = self.exits[exit][1]
# map_str = self.exits[to_exit][9]
# if self.exits[exit][8] != "":
# f.seek(int(self.exits[exit][8], 16) + rom_offset)
# f.write(map_str)
# else:
# map_id = map_str[0:1]
# xcoord = int.to_bytes(int.from_bytes(map_str[1:3], byteorder="little") // 16, 2, byteorder='little')
# ycoord = int.to_bytes(int.from_bytes(map_str[3:5], byteorder="little") // 16, 2, byteorder='little')
# facedir = map_str[5:6]
# camera = map_str[6:8]
# # print(map_id,xcoord,ycoord,facedir,camera)
#
# f.seek(int(self.exits_detailed[exit][0], 16) + rom_offset)
# f.write(map_id)
# f.seek(int(self.exits_detailed[exit][1], 16) + rom_offset)
# f.write(xcoord)
# f.seek(int(self.exits_detailed[exit][2], 16) + rom_offset)
# f.write(ycoord)
# if self.exits_detailed[exit][3] != "":
# f.seek(int(self.exits_detailed[exit][3], 16) + rom_offset)
# f.write(facedir)
# f.seek(int(self.exits_detailed[exit][4], 16) + rom_offset)
# f.write(camera)
# print "ROM successfully created"
# Print parsed list of map headers
def parse_maps(self, f, rom_offset=0):
f.seek(int("d8000", 16) + rom_offset)
header_lengths = {
b"\x02": 1,
b"\x03": 7,
b"\x04": 6,
b"\x05": 7,
b"\x06": 4,
b"\x0e": 3,
b"\x10": 6,
b"\x11": 5,
b"\x13": 2,
b"\x14": 1,
b"\x15": 1,
b"\x17": 5
}
done = False
addr = 0
map_dataset = {}
anchor_dataset = {}
while not done:
map_id = f.read(2)
print(binascii.hexlify(map_id))
map_headers = []
anchor_headers = []
map_done = False
anchor = False
while not map_done:
map_header = f.read(1)
if map_header == b"\x14":
anchor = True
anchor_id = f.read(1)
map_header += anchor_id
map_headers.append(map_header)
print(binascii.hexlify(map_header))
elif map_header == b"\x00":
map_done = True
print(binascii.hexlify(map_header))
print("")
else:
header_len = header_lengths[map_header]
map_header += f.read(header_len)
map_headers.append(map_header)
print(binascii.hexlify(map_header))
if anchor:
anchor_headers.append(map_header)
anchor_dataset[map_id] = map_headers
if anchor_headers:
anchor_dataset[anchor_id] = anchor_headers
if f.tell() >= int("daffe", 16) + rom_offset:
done = True
# print map_headers
print(anchor_headers)
# Pick random start location
def random_start(self,print_log=False):
locations = []
for loc in self.item_locations:
if (self.start_mode == "Forced Unsafe" and self.item_locations[loc][6] == "Unsafe") or (
self.start_mode != "Forced Unsafe" and self.item_locations[loc][6] == "Safe") or (
self.item_locations[loc][6] == self.start_mode):
locations.append(loc)
if not locations:
if print_log:
print("ERROR: Something is fishy with start locations")
return -1
else:
# print locations
# return 93 # TESTING!
return locations[random.randint(0, len(locations) - 1)]
# Shuffle travel destinations
def shuffle_overworld(self,print_log=False):
new_continents = [[],[],[],[],[]]
# Ensure each continent has at least one travel location
destination_list = [1,6,12,14,16,18]
random.shuffle(destination_list)
for continent in new_continents:
continent.append(destination_list.pop(0))
# Randomly assign the rest of the locations
destination_list += [2,3,4,5,7,8,9,10,11,13,15,17,19]
random.shuffle(destination_list)
new_continents[0] += destination_list[:4]
new_continents[1] += destination_list[4:8]
new_continents[2] += destination_list[8:10]
new_continents[3] += destination_list[10:13]
new_continents[4] += destination_list[-1:]
for continent in new_continents:
random.shuffle(continent)
self.overworld_menus[1][0] = new_continents[0][0]
self.overworld_menus[2][0] = new_continents[0][1]
self.overworld_menus[3][0] = new_continents[0][2]
self.overworld_menus[4][0] = new_continents[0][3]
self.overworld_menus[5][0] = new_continents[0][4]
self.overworld_menus[6][0] = new_continents[1][0]
self.overworld_menus[7][0] = new_continents[1][1]
self.overworld_menus[8][0] = new_continents[1][2]
self.overworld_menus[9][0] = new_continents[1][3]
self.overworld_menus[10][0] = new_continents[1][4]
self.overworld_menus[11][0] = new_continents[2][0]
self.overworld_menus[12][0] = new_continents[2][1]
self.overworld_menus[13][0] = new_continents[2][2]
self.overworld_menus[14][0] = new_continents[3][0]
self.overworld_menus[15][0] = new_continents[3][1]
self.overworld_menus[16][0] = new_continents[3][2]
self.overworld_menus[17][0] = new_continents[3][3]
self.overworld_menus[18][0] = new_continents[4][0]
self.overworld_menus[19][0] = new_continents[4][1]
self.graph[10][1].clear()
self.graph[11][1].clear()
self.graph[12][1].clear()
self.graph[13][1].clear()
self.graph[14][1].clear()
self.graph[10][10].clear()
self.graph[11][10].clear()
self.graph[12][10].clear()
self.graph[13][10].clear()
self.graph[14][10].clear()
# Add new overworld to the graph
for entry in self.overworld_menus:
new_entry = self.overworld_menus[entry][0]
self.graph[self.overworld_menus[entry][2]][1].append(self.overworld_menus[new_entry][3])
self.graph[self.overworld_menus[new_entry][3]][1].remove(self.overworld_menus[new_entry][2])
self.graph[self.overworld_menus[new_entry][3]][1].append(self.overworld_menus[entry][2])
return True
# Shuffle enemies in ROM
def enemize(self, f, rom_offset=0):
f.seek(0)
rom = f.read()
# test_enemy = 13 # TESTING!
# test_set = self.enemies[test_enemy][0]
complex_enemies = [4, 15, 53, 62, 88] # Enemies with many sprites, or are no fun
max_complex = 5
# Get list of enemysets
enemysets = []
for set in self.enemysets:
enemysets.append(set)
f.seek(0)
rom = f.read()
# Shuffle enemy stats in Insane
if self.enemizer == "Insane":
insane_enemies = []
insane_templates = []
for enemy in self.enemies:
if self.enemies[enemy][5] and enemy != 102: # Special exception for Zombies
insane_enemies.append(enemy)
insane_templates.append(self.enemies[enemy][2])
random.shuffle(insane_templates)
insane_dictionary = {}
i = 0
for enemy in insane_enemies:
insane_dictionary[enemy] = insane_templates[i]
i += 1
# Randomize enemy spritesets
for map in self.maps:
complex_ct = 0
oldset = self.maps[map][0]
# Determine new enemyset for map
if self.enemizer == "Limited":
sets = [oldset]
elif not self.maps[map][7]:
sets = enemysets[:]
else:
sets = self.maps[map][7][:]
random.shuffle(sets)
newset = sets[0]
# if 10 in sets: # TESTING!
# newset = 10
# newset = test_set # TESTING!
# Gather enemies from old and new sets
old_enemies = []
new_enemies = []
for enemy in self.enemies:
if self.enemies[enemy][0] == oldset:
old_enemies.append(enemy)
if self.enemies[enemy][0] == newset and self.enemies[enemy][5]:
new_enemies.append(enemy)
# Update map header to reflect new enemyset
if self.maps[map][3]:
self.map_patches.append([self.maps[map][3],self.enemysets[newset][0],self.maps[map][4]])
# Randomize each enemy in map
addr_start = self.maps[map][5]
addr_end = self.maps[map][6]
for enemy in old_enemies:
# print self.enemies[enemy][3]
done = False
addr = int(addr_start, 16) + rom_offset
while not done:
addr = rom.find(self.enemies[enemy][1] + self.enemies[enemy][2], addr + 1)
if addr < 0 or addr > int(addr_end, 16) + rom_offset:
done = True
else:
# Pick an enemy from new set
enemytype = self.enemies[enemy][3]
walkable = self.enemies[enemy][4]
new_enemies_tmp = new_enemies[:]
# Get X/Y for special placement exceptions
f.seek(addr - 3)
xcoord = binascii.hexlify(f.read(1))
ycoord = binascii.hexlify(f.read(1))
# 4-Ways cannot be on a #$XF x-coord
if newset == 1 and 13 in new_enemies_tmp:
if xcoord[1] == 102:
new_enemies_tmp.remove(13)
# Zip Flies can't be too close to map origin
elif newset == 10 and 103 in new_enemies_tmp:
if int(xcoord, 16) <= 4 or int(ycoord, 16) <= 4:
new_enemies_tmp.remove(103)
random.shuffle(new_enemies_tmp)
i = 0
found_enemy = False
# if 13 in new_enemies_tmp: # TESTING!
# new_enemy = 13
# found_enemy = True
while not found_enemy:
new_enemy = new_enemies_tmp[i]
new_enemytype = self.enemies[new_enemy][3]
new_walkable = self.enemies[new_enemy][4]
if walkable or new_enemytype == 3 or walkable == new_walkable or i == len(new_enemies_tmp) - 1:
found_enemy = True
# Limit number of complex enemies per map
if new_enemy in complex_enemies:
complex_ct += 1
if complex_ct >= max_complex:
for enemy_tmp in new_enemies:
if enemy_tmp in complex_enemies:
new_enemies.remove(enemy_tmp)
i -= 1
i += 1
f.seek(addr - 1)
# f.write(b"\x00" + self.enemies[test_enemy][1] + self.enemies[test_enemy][2]) # TESTING!
f.write(b"\x00" + self.enemies[new_enemy][1])
if self.enemizer == "Balanced" and enemy == 102:
f.write(b"\x47")
elif map != 27 and self.enemizer != "Balanced": # Moon Tribe cave enemies retain same template
if self.enemizer == "Insane" and new_enemy != 102: # Again, zombie exception
f.write(insane_dictionary[new_enemy])
else:
f.write(self.enemies[new_enemy][2])
# Disable all non-enemy sprites
if self.enemizer != "Limited":
for sprite in self.nonenemy_sprites:
f.seek(int(self.nonenemy_sprites[sprite][1], 16) + rom_offset + 3)
f.write(b"\x02\xe0")
# Build world
def __init__(self, settings: RandomizerData, statues_required=6, statues=[1,2,3,4,5,6], statue_req=StatueReq.GAME_CHOICE.value, kara=3, gem=[3,5,8,12,20,30,50], incatile=[9,5], hieroglyphs=[1,2,3,4,5,6], boss_order=[1,2,3,4,5,6,7]):
self.seed = settings.seed
self.race_mode = settings.race_mode
self.fluteless = settings.fluteless
self.statues = statues
self.statues_required = statues_required
self.statue_req = statue_req
self.boss_order = boss_order
self.dungeons_req = []
for x in self.statues:
self.dungeons_req.append(self.boss_order[x-1])
gaia_coinflip = random.randint(0, 1)
if settings.goal.value == Goal.RED_JEWEL_HUNT.value:
self.goal = "Red Jewel Hunt"
elif settings.goal.value == Goal.APO_GAIA.value or (settings.goal.value == Goal.RANDOM_GAIA.value and gaia_coinflip):
self.goal = "Apocalypse Gaia"
else:
self.goal = "Dark Gaia"
if settings.logic.value == Logic.COMPLETABLE.value:
self.logic_mode = "Completable"
elif settings.logic.value == Logic.BEATABLE.value:
self.logic_mode = "Beatable"
else:
self.logic_mode = "Chaos"
if settings.entrance_shuffle.value == EntranceShuffle.NONE.value:
self.entrance_shuffle = "None"
elif settings.entrance_shuffle.value == EntranceShuffle.COUPLED.value:
self.entrance_shuffle = "Coupled"
elif settings.entrance_shuffle.value == EntranceShuffle.UNCOUPLED.value:
self.entrance_shuffle = "Uncoupled"
if settings.start_location.value == StartLocation.SOUTH_CAPE.value:
self.start_mode = "South Cape"
elif settings.start_location.value == StartLocation.SAFE.value:
self.start_mode = "Safe"
elif settings.start_location.value == StartLocation.UNSAFE.value:
self.start_mode = "Unsafe"
else:
self.start_mode = "Forced Unsafe"
if settings.enemizer.value == Enemizer.NONE.value:
self.enemizer = "None"
elif settings.enemizer.value == Enemizer.BALANCED.value:
self.enemizer = "Balanced"
elif settings.enemizer.value == Enemizer.LIMITED.value:
self.enemizer = "Limited"
elif settings.enemizer.value == Enemizer.FULL.value:
self.enemizer = "Full"
else:
self.enemizer = "Insane"
if settings.ohko:
self.variant = ["OHKO"]
elif settings.red_jewel_madness:
self.variant = ["RJM"]
else:
self.variant = []
if settings.allow_glitches:
self.variant.append("Allow Glitches")
if settings.boss_shuffle:
self.variant.append("Boss Shuffle")
if settings.overworld_shuffle:
self.variant.append("Overworld Shuffle")
if settings.open_mode:
self.variant.append("Open Mode")
if settings.z3:
self.variant.append("Z3 Mode")
self.firebird = settings.firebird
self.start_loc = 10
# self.level = settings.level.value
self.difficulty = settings.difficulty.value
self.kara = kara
self.gem = gem
self.incatile = incatile
self.hieroglyphs = hieroglyphs
self.placement_log = []
self.exit_log = []
self.spoilers = []
self.required_items = [20, 36]
self.good_items = [10, 13, 24, 25, 37, 62, 63, 64]
self.trolly_locations = [32, 45, 64, 65, 102, 108, 121, 128, 136, 147]
self.free_locations = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 24, 33, 34, 35, 36, 37, 38, 39]
self.map_patches = []
self.visited = []
self.items_collected = []
self.item_destinations = []
self.open_locations = [[],[]]
self.open_edges = []
self.graph_viz = None
# Initialize item pool, considers special attacks as "items"
# Format = { ID: [Quantity, Type code (1=item, 2=ability, 3=statue,4=other),
# ROM Code, Name, TakesInventorySpace,
# ProgressionType (1=unlocks new locations,2=quest item,3=no progression)] }
self.item_pool = {
# Items
0: [2, 1, b"\x00", "Nothing", False, 3],
1: [45, 1, b"\x01", "Red Jewel", False, 1],
2: [1, 1, b"\x02", "Prison Key", True, 1],
3: [1, 1, b"\x03", "Inca Statue A", True, 1],
4: [1, 1, b"\x04", "Inca Statue B", True, 1],
5: [0, 1, b"\x05", "Inca Melody", True, 3],
6: [12, 1, b"\x06", "Herb", False, 3],
7: [1, 1, b"\x07", "Diamond Block", True, 1],
8: [1, 1, b"\x08", "Wind Melody", True, 1],
9: [1, 1, b"\x09", "Lola's Melody", True, 1],
10: [1, 1, b"\x0a", "Large Roast", True, 1],
11: [1, 1, b"\x0b", "Mine Key A", True, 1],
12: [1, 1, b"\x0c", "Mine Key B", True, 1],
13: [1, 1, b"\x0d", "Memory Melody", True, 1],
14: [4, 1, b"\x0e", "Crystal Ball", True, 2],
15: [1, 1, b"\x0f", "Elevator Key", True, 1],
16: [1, 1, b"\x10", "Mu Palace Key", True, 1],
17: [1, 1, b"\x11", "Purification Stone", True, 1],
18: [2, 1, b"\x12", "Statue of Hope", True, 1],
19: [2, 1, b"\x13", "Rama Statue", False, 2],
20: [1, 1, b"\x14", "Magic Dust", True, 2],
21: [0, 1, b"\x15", "Blue Journal", False, 3],
22: [1, 1, b"\x16", "Lance's Letter", False, 3],
23: [1, 1, b"\x17", "Necklace Stones", True, 1],
24: [1, 1, b"\x18", "Will", True, 1],
25: [1, 1, b"\x19", "Teapot", True, 1],
26: [3, 1, b"\x1a", "Mushroom Drops", True, 1],
27: [0, 1, b"\x1b", "Bag of Gold", False, 3],
28: [1, 1, b"\x1c", "Black Glasses", False, 1],
29: [1, 1, b"\x1d", "Gorgon Flower", True, 1],
30: [1, 1, b"\x1e", "Hieroglyph", False, 2],
31: [1, 1, b"\x1f", "Hieroglyph", False, 2],
32: [1, 1, b"\x20", "Hieroglyph", False, 2],
33: [1, 1, b"\x21", "Hieroglyph", False, 2],
34: [1, 1, b"\x22", "Hieroglyph", False, 2],
35: [1, 1, b"\x23", "Hieroglyph", False, 2],
36: [1, 1, b"\x24", "Aura", True, 1],
37: [1, 1, b"\x25", "Lola's Letter", False, 1],
38: [1, 1, b"\x26", "Father's Journal", False, 2],
39: [1, 1, b"\x27", "Crystal Ring", False, 1],
40: [1, 1, b"\x28", "Apple", True, 1],
41: [1, 1, b"\x2e", "2 Red Jewels", False, 1],
42: [1, 1, b"\x2f", "3 Red Jewels", False, 1],
# Status Upgrades
50: [3, 1, b"\x87", "HP Upgrade", False, 3],
51: [1, 1, b"\x89", "DEF Upgrade", False, 3],
52: [2, 1, b"\x88", "STR Upgrade", False, 3],
53: [1, 1, b"\x8a", "Psycho Dash Upgrade", False, 3],
54: [2, 1, b"\x8b", "Dark Friar Upgrade", False, 3],
55: [0, 1, b"\x8c", "Heart Piece", False, 3],
# Abilities
60: [0, 2, "", "Nothing", False, 3],
61: [1, 2, "", "Psycho Dash", False, 1],
62: [1, 2, "", "Psycho Slider", False, 1],
63: [1, 2, "", "Spin Dash", False, 1],
64: [1, 2, "", "Dark Friar", False, 1],
65: [1, 2, "", "Aura Barrier", False, 1],
66: [1, 2, "", "Earthquaker", False, 1],
67: [0, 2, "", "Firebird", False, 1],
# Mystic Statues
100: [1, 3, "", "Mystic Statue 1", False, 2],
101: [1, 3, "", "Mystic Statue 2", False, 2],
102: [1, 3, "", "Mystic Statue 3", False, 2],
103: [1, 3, "", "Mystic Statue 4", False, 2],
104: [1, 3, "", "Mystic Statue 5", False, 2],
105: [1, 3, "", "Mystic Statue 6", False, 2],
106: [0, 3, "", "Mystic Statue", False, 2],
# Event Switches
500: [0, 4, "", "Kara Released", False, 1],
501: [0, 4, "", "Itory: Got Lilly", False, 1],
502: [0, 4, "", "Moon Tribe: Healed Spirits", False, 1],
503: [0, 4, "", "Inca: Beat Castoth", False, 1],
504: [0, 4, "", "Freejia: Found Laborer", False, 1],
505: [0, 4, "", "Neil's: Memory Restored", False, 1],
506: [0, 4, "", "Sky Garden: Map 82 NW Switch", False, 1],
507: [0, 4, "", "Sky Garden: Map 82 NE Switch", False, 1],
508: [0, 4, "", "Sky Garden: Map 82 NW Switch", False, 1],
509: [0, 4, "", "Sky Garden: Map 84 Switch", False, 1],
510: [0, 4, "", "Seaside: Fountain Purified", False, 1],
511: [0, 4, "", "Mu: Water Lowered 1", False, 1],
512: [0, 4, "", "Mu: Water Lowered 2", False, 1],
513: [0, 4, "", "Angel: Puzzle Complete", False, 1],
514: [0, 4, "", "Mt Kress: Drops Used 1", False, 1],
515: [0, 4, "", "Mt Kress: Drops Used 2", False, 1],
516: [0, 4, "", "Mt Kress: Drops Used 3", False, 1],
517: [0, 4, "", "Pyramid: Hieroglyphs Placed", False, 1],
518: [0, 4, "", "Babel: Castoth Defeated", False, 1],
519: [0, 4, "", "Babel: Viper Defeated", False, 1],
520: [0, 4, "", "Babel: Vampires Defeated", False, 1],
521: [0, 4, "", "Babel: Sand Fanger Defeated", False, 1],
522: [0, 4, "", "Babel: Mummy Queen Defeated", False, 1],
523: [0, 4, "", "Mansion: Solid Arm Defeated", False, 1],
# Misc
600: [0, 4, "", "Freedan Access", False, 1],
601: [0, 4, "", "Glitches", False, 1],
602: [0, 4, "", "Early Firebird", False, 1]
}
# Define Item/Ability/Statue locations
# Format: { ID: [Region, Type (1=item,2=ability,3=statue,4=other), Filled Flag,
# Filled Item, Restricted Items, Item Addr, Text Addr, Text2 Addr,
# Special (map# or inventory addr), Name, Swapped Flag]}
# (For random start, [6]=Type, [7]=XY_spawn_data)
self.item_locations = {
# Jeweler
0: [2, 1, False, 0, [], "8d019", "8d19d", "", "8d260", "Jeweler Reward 1 "],
1: [3, 1, False, 0, [], "8d028", "8d1ba", "", "8d274", "Jeweler Reward 2 "],
2: [4, 1, False, 0, [], "8d037", "8d1d7", "", "8d288", "Jeweler Reward 3 "],
3: [5, 1, False, 0, [], "8d04a", "8d1f4", "", "8d29c", "Jeweler Reward 4 "],
4: [6, 1, False, 0, [], "8d059", "8d211", "", "8d2b0", "Jeweler Reward 5 "],
5: [7, 1, False, 0, [], "8d069", "8d2ea", "", "8d2c4", "Jeweler Reward 6 "],
# South Cape
6: [21, 1, False, 0, [], "F51D", "F52D", "F543", "", "South Cape: Bell Tower "],
7: [20, 1, False, 0, [], "4846e", "48479", "", "", "South Cape: Fisherman "], # text2 was 0c6a1
8: [26, 1, False, 0, [], "F59D", "F5AD", "F5C3", "", "South Cape: Lance's House "],
9: [23, 1, False, 0, [], "499e4", "49be5", "", "", "South Cape: Lola "],
10: [21, 2, False, 0, [64, 65, 66], "c830a", "Safe", b"\xE0\x00\x70\x00\x83\x00\x43", b"\x01", "South Cape: Dark Space "],
# Edward's
11: [30, 1, False, 0, [], "4c214", "4c299", "", "", "Edward's Castle: Hidden Guard "],
12: [30, 1, False, 0, [], "4d0ef", "4d141", "", "", "Edward's Castle: Basement "],
13: [32, 1, False, 0, [], "4d32f", "4d4b1", "", "", "Edward's Prison: Hamlet "], # text 4d5f4?
14: [32, 2, False, 0, [64, 65, 66], "c8637", "", "", b"\x0b", "Edward's Prison: Dark Space "],
# Underground Tunnel
15: [42, 1, False, 0, [], "1AFA9", "", "", "", "Underground Tunnel: Spike's Chest "],
16: [44, 1, False, 0, [], "1AFAE", "", "", "", "Underground Tunnel: Small Room Chest"],
17: [48, 1, False, 0, [], "1AFB3", "", "", "", "Underground Tunnel: Ribber's Chest "],
18: [49, 1, False, 0, [], "F61D", "F62D", "F643", "", "Underground Tunnel: Barrels "],
19: [47, 2, False, 0, [], "c8aa2", "Unsafe", b"\xA0\x00\xD0\x04\x83\x00\x74", b"\x12", "Underground Tunnel: Dark Space "], # Always open
# Itory
20: [51, 1, False, 0, [], "F69D", "F6AD", "F6C3", "", "Itory Village: Logs "],
21: [58, 1, False, 0, [], "4f375", "4f38d", "4f3a8", "", "Itory Village: Cave "],
22: [51, 2, False, 0, [64, 65, 66], "c8b34", "Safe", b"\x30\x04\x90\x00\x83\x00\x35", b"\x15", "Itory Village: Dark Space "],
# Moon Tribe
23: [62, 1, False, 0, [], "4fae1", "4faf9", "4fb16", "", "Moon Tribe: Cave "],
# Inca
24: [71, 1, False, 0, [], "1AFB8", "", "", "", "Inca Ruins: Diamond-Block Chest "],
25: [92, 1, False, 0, [], "1AFC2", "", "", "", "Inca Ruins: Broken Statues Chest "],
26: [83, 1, False, 0, [], "1AFBD", "", "", "", "Inca Ruins: Stone Lord Chest "],
27: [93, 1, False, 0, [], "1AFC6", "", "", "", "Inca Ruins: Slugger Chest "],
28: [76, 1, False, 0, [], "9c5bd", "9c614", "9c637", "", "Inca Ruins: Singing Statue "],
29: [96, 2, False, 0, [], "c9302", "Unsafe", b"\x10\x01\x90\x00\x83\x00\x32", b"\x28", "Inca Ruins: Dark Space 1 "], # Always open
30: [93, 2, False, 0, [], "c923b", "Unsafe", b"\xC0\x01\x50\x01\x83\x00\x32", b"\x26", "Inca Ruins: Dark Space 2 "],
31: [77, 2, False, 0, [], "c8db8", "", "", b"\x1e", "Inca Ruins: Final Dark Space "],
# Gold Ship
32: [100, 1, False, 0, [], "5965e", "5966e", "", "", "Gold Ship: Seth "],
# Diamond Coast
33: [102, 1, False, 0, [], "F71D", "F72D", "F743", "", "Diamond Coast: Jar "],
# Freejia
34: [121, 1, False, 0, [], "F79D", "F7AD", "F7C3", "", "Freejia: Hotel "],
35: [110, 1, False, 0, [], "5b6d8", "5b6e8", "", "", "Freejia: Creepy Guy "],
36: [110, 1, False, 0, [], "5cf9e", "5cfae", "5cfc4", "", "Freejia: Trash Can 1 "],
37: [110, 1, False, 0, [], "5cf3d", "5cf49", "", "", "Freejia: Trash Can 2 "], # text2 was 5cf5b
38: [115, 1, False, 0, [], "5b8b7", "5b962", "5b9ee", "", "Freejia: Snitch "], # text1 was @5b94d
39: [125, 2, False, 0, [64, 65, 66], "c96ce", "Safe", b"\x40\x00\xa0\x00\x83\x00\x11", b"\x34", "Freejia: Dark Space "],
# Diamond Mine
40: [134, 1, False, 0, [], "1AFD0", "", "", "", "Diamond Mine: Chest "],
41: [137, 1, False, 0, [], "5d7e4", "5d819", "5d830", "", "Diamond Mine: Trapped Laborer "],
42: [143, 1, False, 0, [], "aa777", "aa85c", "", "", "Diamond Mine: Laborer w/Elevator Key"], # text1 was aa811
43: [148, 1, False, 0, [], "5d4d2", "5d4eb", "5d506", "", "Diamond Mine: Morgue "],
44: [149, 1, False, 0, [], "aa757", "aa7ef", "", "", "Diamond Mine: Laborer w/Mine Key "], # text1 was aa7b4
45: [150, 1, False, 0, [], "5d2b0", "5d2da", "", "", "Diamond Mine: Sam "],
46: [136, 2, False, 0, [], "c9a87", "Unsafe", b"\xb0\x01\x70\x01\x83\x00\x32", b"\x40", "Diamond Mine: Appearing Dark Space "], # Always open
47: [131, 2, False, 0, [], "c98b0", "Unsafe", b"\xd0\x00\xc0\x00\x83\x00\x61", b"\x3d", "Diamond Mine: Dark Space at Wall "],
48: [142, 2, False, 0, [], "c9b49", "", "", b"\x42", "Diamond Mine: Dark Space behind Wall"],
# Sky Garden
49: [172, 1, False, 0, [], "1AFDD", "", "", "", "Sky Garden: (NE) Platform Chest "],
50: [173, 1, False, 0, [], "1AFD9", "", "", "", "Sky Garden: (NE) Blue Cyber Chest "],
51: [174, 1, False, 0, [], "1AFD5", "", "", "", "Sky Garden: (NE) Statue Chest "],
52: [180, 1, False, 0, [], "1AFE2", "", "", "", "Sky Garden: (SE) Dark Side Chest "],
53: [185, 1, False, 0, [], "1AFE7", "", "", "", "Sky Garden: (SW) Ramp Chest "],
54: [186, 1, False, 0, [], "1AFEC", "", "", "", "Sky Garden: (SW) Dark Side Chest "],
55: [194, 1, False, 0, [], "1AFF1", "", "", "", "Sky Garden: (NW) Top Chest "],
56: [194, 1, False, 0, [], "1AFF5", "", "", "", "Sky Garden: (NW) Bottom Chest "],
57: [170, 2, False, 0, [64, 65, 66], "c9d63", "Safe", b"\x90\x00\x70\x00\x83\x00\x22", b"\x4c", "Sky Garden: Dark Space (Foyer) "],
58: [169, 2, False, 0, [], "ca505", "Unsafe", b"\x70\x00\xa0\x00\x83\x00\x11", b"\x56", "Sky Garden: Dark Space (SE) "], # in the room
59: [183, 2, False, 0, [], "ca173", "", "", b"\x51", "Sky Garden: Dark Space (SW) "],
60: [195, 2, False, 0, [], "ca422", "Unsafe", b"\x20\x00\x70\x00\x83\x00\x44", b"\x54", "Sky Garden: Dark Space (NW) "],
# Seaside Palace
61: [202, 1, False, 0, [], "1AFFF", "", "", "", "Seaside Palace: Side Room Chest "],
62: [200, 1, False, 0, [], "1AFFA", "", "", "", "Seaside Palace: First Area Chest "],
63: [205, 1, False, 0, [], "1B004", "", "", "", "Seaside Palace: Second Area Chest "],
64: [206, 1, False, 0, [], "68af7", "68ea9", "68f02", "", "Seaside Palace: Buffy "],
65: [208, 1, False, 0, [], "6922d", "6939e", "693b7", "", "Seaside Palace: Coffin "], # text1 was 69377
66: [200, 2, False, 0, [64, 65, 66], "ca574", "Safe", b"\xf0\x02\x90\x00\x83\x00\x64", b"\x5a", "Seaside Palace: Dark Space "],
# Mu
67: [217, 1, False, 0, [], "1B012", "", "", "", "Mu: Empty Chest 1 "],
68: [220, 1, False, 0, [], "1B01B", "", "", "", "Mu: Empty Chest 2 "],
69: [225, 1, False, 0, [], "698be", "698d2", "", "", "Mu: Hope Statue 1 "],
70: [236, 1, False, 0, [], "69966", "69975", "", "", "Mu: Hope Statue 2 "],
71: [215, 1, False, 0, [], "1B00D", "", "", "", "Mu: Chest s/o Hope Room 2 "],
72: [214, 1, False, 0, [], "1B009", "", "", "", "Mu: Rama Chest N "],
73: [219, 1, False, 0, [], "1B016", "", "", "", "Mu: Rama Chest E "],
74: [218, 2, False, 0, [], "ca92d", "", "", b"\x60", "Mu: Open Dark Space "], # Always open
75: [228, 2, False, 0, [], "caa99", "", "", b"\x62", "Mu: Slider Dark Space "],
# Angel Village
76: [254, 1, False, 0, [], "F81D", "F82D", "F843", "", "Angel Village: Dance Hall "],
77: [255, 2, False, 0, [64, 65, 66], "caf67", "Safe", b"\x90\x01\xb0\x00\x83\x01\x12", b"\x6c", "Angel Village: Dark Space "],
# Angel Dungeon
78: [265, 1, False, 0, [], "1B020", "", "", "", "Angel Dungeon: Slider Chest "],
79: [271, 1, False, 0, [], "F89D", "F8AD", "F8C3", "", "Angel Dungeon: Ishtar's Room "],
80: [274, 1, False, 0, [], "1B02A", "", "", "", "Angel Dungeon: Puzzle Chest 1 "],
81: [274, 1, False, 0, [], "1B02E", "", "", "", "Angel Dungeon: Puzzle Chest 2 "],
82: [273, 1, False, 0, [], "1B025", "", "", "", "Angel Dungeon: Ishtar's Chest "],
# Watermia
83: [280, 1, False, 0, [], "F91D", "F92D", "F943", "", "Watermia: West Jar "],
85: [286, 1, False, 0, [], "7ad21", "7aede", "", "", "Watermia: Lance "], # text2 was 7afa7
86: [283, 1, False, 0, [], "F99D", "F9AD", "F9C3", "", "Watermia: Gambling House "],
87: [280, 1, False, 0, [], "79248", "79288", "792a1", "", "Watermia: Russian Glass "],
88: [282, 2, False, 0, [64, 65, 66], "cb644", "Safe", b"\x40\x00\xa0\x00\x83\x00\x11", b"\x7c", "Watermia: Dark Space "],
# Great Wall
89: [290, 1, False, 0, [], "7b5c5", "7b5d1", "", "", "Great Wall: Necklace 1 "],
90: [292, 1, False, 0, [], "7b625", "7b631", "", "", "Great Wall: Necklace 2 "],
91: [292, 1, False, 0, [], "1B033", "", "", "", "Great Wall: Chest 1 "],
92: [294, 1, False, 0, [], "1B038", "", "", "", "Great Wall: Chest 2 "],
93: [295, 2, False, 0, [], "cbb11", "Unsafe", b"\x60\x00\xc0\x02\x83\x20\x38", b"\x85", "Great Wall: Archer Dark Space "],
94: [297, 2, False, 0, [], "cbb80", "Unsafe", b"\x50\x01\x80\x04\x83\x00\x63", b"\x86", "Great Wall: Platform Dark Space "], # Always open
95: [300, 2, False, 0, [], "cbc60", "", "", b"\x88", "Great Wall: Appearing Dark Space "],
# Euro
96: [310, 1, False, 0, [], "FA1D", "FA2D", "FA43", "", "Euro: Alley "],
97: [310, 1, False, 0, [], "7c0b3", "7c0f3", "", "", "Euro: Apple Vendor "],
98: [320, 1, False, 0, [], "7e51f", "7e534", "7e54a", "", "Euro: Hidden House "],
99: [323, 1, False, 0, [], "7cd12", "7cd39", "7cd9b", "", "Euro: Store Item 1 "],
100: [323, 1, False, 0, [], "7cdf9", "7ce28", "7ce3e", "", "Euro: Store Item 2 "], # text2 was 7cedd
101: [321, 1, False, 0, [], "FA9D", "FAAD", "FAC3", "", "Euro: Shrine "],
102: [315, 1, False, 0, [], "7df58", "7e10a", "", "", "Euro: Ann "],
103: [325, 2, False, 0, [64, 65, 66], "cc0b0", "Safe", b"\xb0\x00\xb0\x00\x83\x00\x11", b"\x99", "Euro: Dark Space "],
# Mt Temple
104: [336, 1, False, 0, [], "1B03D", "", "", "", "Mt. Temple: Red Jewel Chest "],
105: [338, 1, False, 0, [], "1B042", "", "", "", "Mt. Temple: Drops Chest 1 "],
106: [342, 1, False, 0, [], "1B047", "", "", "", "Mt. Temple: Drops Chest 2 "],
107: [343, 1, False, 0, [], "1B04C", "", "", "", "Mt. Temple: Drops Chest 3 "],
108: [345, 1, False, 0, [], "1B051", "", "", "", "Mt. Temple: Final Chest "],
109: [332, 2, False, 0, [], "cc24f", "Unsafe", b"\xf0\x01\x10\x03\x83\x00\x44", b"\xa1", "Mt. Temple: Dark Space 1 "],
110: [337, 2, False, 0, [], "cc419", "Unsafe", b"\xc0\x07\xc0\x00\x83\x00\x28", b"\xa3", "Mt. Temple: Dark Space 2 "],
111: [343, 2, False, 0, [], "cc7b8", "", "", b"\xa7", "Mt. Temple: Dark Space 3 "],
# Natives'
112: [353, 1, False, 0, [], "FB1D", "FB2D", "FB43", "", "Natives' Village: Statue Room "],
113: [354, 1, False, 0, [], "893af", "8942a", "", "", "Natives' Village: Statue "],
114: [350, 2, False, 0, [64, 65, 66], "cca37", "Safe", b"\xc0\x01\x50\x00\x83\x00\x22", b"\xac", "Natives' Village: Dark Space "],
# Ankor Wat
115: [361, 1, False, 0, [], "1B056", "", "", "", "Ankor Wat: Ramp Chest "],
116: [370, 1, False, 0, [], "1B05B", "", "", "", "Ankor Wat: Flyover Chest "],
117: [378, 1, False, 0, [], "1B060", "", "", "", "Ankor Wat: U-Turn Chest "],
118: [382, 1, False, 0, [], "1B065", "", "", "", "Ankor Wat: Drop Down Chest "],
119: [389, 1, False, 0, [], "1B06A", "", "", "", "Ankor Wat: Forgotten Chest "],
120: [380, 1, False, 0, [], "89fa3", "89fbb", "", "", "Ankor Wat: Glasses Location "], # slow text @89fdc
121: [391, 1, False, 0, [], "89adc", "89af1", "89b07", "", "Ankor Wat: Spirit "], # item was 89b0d, text was 89e2e
122: [372, 2, False, 0, [], "cce92", "Unsafe", b"\x20\x04\x30\x03\x83\x00\x46", b"\xb6", "Ankor Wat: Garden Dark Space "], # Always open
123: [377, 2, False, 0, [], "cd0a2", "", "", b"\xb8", "Ankor Wat: Earthquaker Dark Space "],
124: [383, 2, False, 0, [], "cd1a7", "Unsafe", b"\xb0\x02\xc0\x01\x83\x00\x33", b"\xbb", "Ankor Wat: Drop Down Dark Space "], # Always open
# Dao
125: [400, 1, False, 0, [], "8b1b0", "", "", "", "Dao: Entrance Item 1 "],
126: [400, 1, False, 0, [], "8b1b5", "", "", "", "Dao: Entrance Item 2 "],
127: [400, 1, False, 0, [], "FB9D", "FBAD", "FBC3", "", "Dao: East Grass "],
128: [403, 1, False, 0, [], "8b016", "8b073", "8b090", "", "Dao: Snake Game "],
129: [400, 2, False, 0, [64, 65, 66], "cd3d0", "Safe", b"\x20\x00\x80\x00\x83\x00\x23", b"\xc3", "Dao: Dark Space "],
# Pyramid
130: [411, 1, False, 0, [], "8dcb7", "8e66c", "8e800", "", "Pyramid: Dark Space Top "], # text2 was 8e800
131: [412, 1, False, 0, [], "FC1D", "FC2D", "FC43", "", "Pyramid: Hidden Platform "],
132: [442, 1, False, 0, [], "8c7b2", "8c7c9", "", "", "Pyramid: Hieroglyph 1 "],
133: [422, 1, False, 0, [], "1B06F", "", "", "", "Pyramid: Room 2 Chest "],
134: [443, 1, False, 0, [], "8c879", "8c88c", "", "", "Pyramid: Hieroglyph 2 "],
135: [432, 1, False, 0, [], "1B079", "", "", "", "Pyramid: Room 3 Chest "],
136: [444, 1, False, 0, [], "8c921", "8c934", "", "", "Pyramid: Hieroglyph 3 "],
137: [439, 1, False, 0, [], "1B07E", "", "", "", "Pyramid: Room 4 Chest "],
138: [445, 1, False, 0, [], "8c9c9", "8c9dc", "", "", "Pyramid: Hieroglyph 4 "],
139: [428, 1, False, 0, [], "1B074", "", "", "", "Pyramid: Room 5 Chest "],
140: [446, 1, False, 0, [], "8ca71", "8ca84", "", "", "Pyramid: Hieroglyph 5 "],
141: [447, 1, False, 0, [], "8cb19", "8cb2c", "", "", "Pyramid: Hieroglyph 6 "],
142: [413, 2, True, 0, [], "cd570", "Unsafe", b"\xc0\x01\x90\x03\x83\x00\x44", b"\xcc", "Pyramid: Dark Space Bottom "], # Always open
# Babel
143: [461, 1, False, 0, [], "FC9D", "FCAD", "FCC3", "", "Babel: Pillow "],
144: [461, 1, False, 0, [], "99a4f", "99ae4", "99afe", "", "Babel: Force Field "], # item was 99a61
145: [461, 2, False, 0, [64, 65, 66], "ce09b", "Forced Unsafe", b"\x90\x07\xb0\x01\x83\x10\x28", b"\xdf", "Babel: Dark Space Bottom "],
146: [472, 2, False, 0, [64, 65, 66], "ce159", "Safe", b"\xb0\x02\xb0\x01\x83\x10\x23", b"\xe3", "Babel: Dark Space Top "],
# Jeweler's Mansion
147: [480, 1, False, 0, [], "1B083", "", "", "", "Jeweler's Mansion: Chest "],
# Mystic Statues
148: [101, 3, False, 0, [101, 102, 103, 104, 105], "", "", "", "", "Castoth Prize "],
149: [198, 3, False, 0, [100, 102, 103, 104, 105], "", "", "", "", "Viper Prize "],
150: [244, 3, False, 0, [100, 101, 103, 104, 105], "", "", "", "", "Vampires Prize "],
151: [302, 3, False, 0, [100, 101, 102, 104, 105], "", "", "", "", "Sand Fanger Prize "],
152: [448, 3, False, 0, [100, 101, 102, 103, 105], "", "", "", "", "Mummy Queen Prize "],
153: [479, 3, False, 0, [100, 101, 102, 103, 104], "", "", "", "", "Babel Prize "],
# Event Switches
500: [500, 4, True, 500, [], "", "", "", "", "Kara "],
501: [501, 4, True, 501, [], "", "", "", "", "Lilly "],
502: [502, 4, True, 502, [], "", "", "", "", "Moon Tribe: Spirits Healed "],
503: [503, 4, True, 503, [], "", "", "", "", "Inca: Castoth defeated "],
504: [504, 4, True, 504, [], "", "", "", "", "Freejia: Found Laborer "],
505: [505, 4, True, 505, [], "", "", "", "", "Neil's Memory Restored "],
506: [506, 4, True, 506, [], "", "", "", "", "Sky Garden: Map 82 NW Switch "],
507: [507, 4, True, 507, [], "", "", "", "", "Sky Garden: Map 82 NE Switch "],
508: [508, 4, True, 508, [], "", "", "", "", "Sky Garden: Map 82 SE Switch "],
509: [509, 4, True, 509, [], "", "", "", "", "Sky Garden: Map 84 Switch "],
510: [510, 4, True, 510, [], "", "", "", "", "Seaside: Fountain Purified "],
511: [511, 4, True, 511, [], "", "", "", "", "Mu: Water Lowered 1 "],
512: [512, 4, True, 512, [], "", "", "", "", "Mu: Water Lowered 2 "],
513: [513, 4, True, 513, [], "", "", "", "", "Angel: Puzzle Complete "],
514: [514, 4, True, 514, [], "", "", "", "", "Mt Kress: Drops used 1 "],
515: [515, 4, True, 515, [], "", "", "", "", "Mt Kress: Drops used 2 "],
516: [516, 4, True, 516, [], "", "", "", "", "Mt Kress: Drops used 3 "],
517: [517, 4, True, 517, [], "", "", "", "", "Pyramid: Hieroglyphs placed "],
518: [518, 4, True, 518, [], "", "", "", "", "Babel: Castoth defeated "],
519: [519, 4, True, 519, [], "", "", "", "", "Babel: Viper defeated "],
520: [520, 4, True, 520, [], "", "", "", "", "Babel: Vampires defeated "],
521: [521, 4, True, 521, [], "", "", "", "", "Babel: Sand Fanger defeated "],
522: [522, 4, True, 522, [], "", "", "", "", "Babel: Mummy Queen defeated "],
523: [523, 4, True, 523, [], "", "", "", "", "Mansion: Solid Arm defeated "],
# Misc
600: [600, 4, True, 600, [], "", "", "", "", "Freedan Access "],
601: [601, 4, True, 601, [], "", "", "", "", "Glitches "],
602: [602, 4, True, 602, [], "", "", "", "", "Early Firebird "],
603: [491, 4, True, 67, [], "", "", "", "", "Firebird "]
}
# World graph
# Format: { Region ID:
# Traversed_flag, [AccessibleRegions], type(0=other/misc,1=exterior,2=interior), [continentID,areaID,layer,MapID],
# 4: DS_access (0=no_access,1=any_DS,2=form_change_DS),
# 5: RegionName,
# 6: [ItemsToRemove],
# 7: ForceFormChange,
# 8: [AccessibleFromNodes],
# 9: [Accessible_DS_nodes],
# 10: [Accessible_Nodes_w_Logic],
# 11: [item_locations],
# 12: [origin_logic],
# 13: [dest_logic],
# 14: [origin_exits],
# 15: [dest_exits] }
self.graph = {
# Game Start
0: [False, [22], 0, [0,0,0,b"\x00"], 0, "Game Start", [], True, [], [], [], [], [], [], [], []],
# Jeweler
1: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Access", [], False, [], [], [], [], [], [], [], []],
2: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 1", [], False, [], [], [], [], [], [], [], []],
3: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 2", [], False, [], [], [], [], [], [], [], []],
4: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 3", [], False, [], [], [], [], [], [], [], []],
5: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 4", [], False, [], [], [], [], [], [], [], []],
6: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 5", [], False, [], [], [], [], [], [], [], []],
7: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 6", [], False, [], [], [], [], [], [], [], []],
8: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 7", [], False, [], [], [], [], [], [], [], []],
# Overworld Menus
10: [False, [20,30,50,60,63], 0, [1,0,0,b"\x00"], 0, "Overworld: SW Continent", [], True, [], [], [], [], [], [], [], []],
11: [False, [102,110,133,160,162], 0, [2,0,0,b"\x00"], 0, "Overworld: SE Continent", [], True, [], [], [], [], [], [], [], []],
12: [False, [250,280,290], 0, [3,0,0,b"\x00"], 0, "Overworld: NE Continent", [], True, [], [], [], [], [], [], [], []],
13: [False, [310,330,350,360], 0, [4,0,0,b"\x00"], 0, "Overworld: N Continent", [], True, [], [], [], [], [], [], [], []],
14: [False, [400,410], 0, [5,0,0,b"\x00"], 0, "Overworld: NW Continent", [], True, [], [], [], [], [], [], [], []],
# Passage Menus
15: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Seth", [], True, [], [], [], [], [], [], [], []],
16: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Moon Tribe", [], True, [], [], [], [], [], [], [], []],
17: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Neil", [], True, [], [], [], [], [], [], [], []],
# South Cape
20: [False, [1,10], 1, [1,1,0,b"\x00"], 0, "South Cape: Main Area", [], False, [], [], [], [], [], [], [], []],
21: [False, [20], 1, [1,1,0,b"\x00"], 0, "South Cape: School Roof", [], False, [], [], [], [], [], [], [], []],
22: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: School", [], False, [], [], [], [], [], [], [], []],
23: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Will's House", [], False, [], [], [], [], [], [], [], []],
24: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: East House", [], False, [], [], [], [], [], [], [], []],
25: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Seth's House", [], False, [], [], [], [], [], [], [], []],
26: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Lance's House", [], False, [], [], [], [], [], [], [], []],
27: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Erik's House", [], False, [], [], [], [], [], [], [], []],
28: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Seaside Cave", [], False, [], [], [], [], [], [], [], []],
# Edward's / Prison
30: [False, [10], 1, [1,2,0,b"\x00"], 0, "Edward's Castle: Main Area", [], False, [], [], [], [], [], [], [], []],
31: [False, [30], 1, [1,2,0,b"\x00"], 0, "Edward's Castle: Behind Guard", [], False, [], [], [], [], [], [], [], []],
32: [False, [], 2, [1,2,0,b"\x00"], 0, "Edward's Prison: Will's Cell", [2], False, [], [], [], [], [], [], [], []],
33: [False, [], 2, [1,2,0,b"\x00"], 0, "Edward's Prison: Prison Main", [2], False, [], [], [], [], [], [], [], []],
# Underground Tunnel
40: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 12", [], False, [], [], [], [], [], [], [], []],
41: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 13", [], False, [], [], [], [], [], [], [], []],
42: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 14", [], False, [], [], [], [], [], [], [], []],
43: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 15", [], False, [], [], [], [], [], [], [], []],
44: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 16", [], False, [], [], [], [], [], [], [], []],
45: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 17 (entrance)", [], False, [], [], [], [], [], [], [], []],
46: [False, [45], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 17 (exit open)", [], False, [], [], [], [], [], [], [], []],
47: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 18 (before bridge)", [], False, [], [], [], [], [], [], [], []],
48: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 18 (after bridge)", [], False, [], [], [], [], [], [], [], []],
49: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Exit", [], True, [], [], [], [], [], [], [], []],
# Itory
50: [False, [10], 1, [1,3,0,b"\x00"], 0, "Itory: Entrance", [9], False, [], [], [], [], [], [], [], []],
51: [False, [50], 1, [1,3,0,b"\x00"], 0, "Itory: Main Area", [], False, [], [], [], [], [], [], [], []],
52: [False, [], 1, [1,3,0,b"\x00"], 0, "Itory: Lilly's Back Porch", [], False, [], [], [], [], [], [], [], []],
53: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: West House", [], False, [], [], [], [], [], [], [], []],
54: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: North House", [], False, [], [], [], [], [], [], [], []],
55: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Lilly's House", [23], False, [], [], [], [], [], [], [], []],
56: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Cave", [], False, [], [], [], [], [], [], [], []],
57: [False, [56], 2, [1,3,0,b"\x00"], 0, "Itory: Cave (behind false wall)", [], False, [], [], [], [], [], [], [], []],
58: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Cave (secret room)", [], False, [], [], [], [], [], [], [], []],
59: [False, [55,501], 0, [1,3,0,b"\x00"], 0, "Itory: Got Lilly", [], False, [], [], [], [], [], [], [], []],
# Moon Tribe / Inca Entrance
60: [False, [10], 1, [1,4,0,b"\x00"], 0, "Moon Tribe: Main Area", [25], False, [], [], [], [], [], [], [], []],
61: [False, [], 2, [1,4,0,b"\x00"], 0, "Moon Tribe: Cave", [], False, [], [], [], [], [], [], [], []],
62: [False, [61], 2, [1,4,0,b"\x00"], 0, "Moon Tribe: Cave (Pedestal)", [], False, [], [], [], [], [], [], [], []],
63: [False, [10], 1, [1,5,0,b"\x00"], 0, "Inca: Entrance", [], False, [], [], [], [], [], [], [], []],
64: [False, [60,502], 0, [1,4,0,b"\x00"], 0, "Moon Tribe: Spirits Awake", [], False, [], [], [], [], [], [], [], []],
# Inca Ruins
70: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (NE)", [], False, [], [], [], [], [], [], [], []],
71: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (NW)", [], False, [], [], [], [], [], [], [], []],
72: [False, [70,73], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (N)", [], False, [], [], [], [], [], [], [], []],
73: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (center)", [], False, [], [], [], [], [], [], [], []],
74: [False, [72], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SW)", [], False, [], [], [], [], [], [], [], []],
75: [False, [72,99], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SE)", [], False, [], [], [], [], [], [], [], []],
76: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (statue head)", [], False, [], [], [], [], [], [], [], []],
77: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 30 (first area)", [3, 4], False, [], [], [], [], [], [], [], []],
78: [False, [77], 2, [1,5,0,b"\x00"], 0, "Inca: Map 30 (second area)", [], False, [], [], [], [], [], [], [], []],
79: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 31", [], False, [], [], [], [], [], [], [], []],
80: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 32 (entrance)", [], False, [], [], [], [], [], [], [], []],
81: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 32 (behind statue)", [], False, [], [], [], [], [], [], [], []],
82: [False, [83], 2, [1,5,0,b"\x00"], 0, "Inca: Map 33 (entrance)", [], False, [], [], [], [], [], [], [], []],
83: [False, [82], 2, [1,5,0,b"\x00"], 0, "Inca: Map 33 (over ramp)", [], False, [], [], [], [], [], [], [], []], # Need to prevent softlocks here
84: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 34", [], False, [], [], [], [], [], [], [], []],
85: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 35 (entrance)", [], False, [], [], [], [], [], [], [], []],
86: [False, [85], 2, [1,5,0,b"\x00"], 0, "Inca: Map 35 (over ramp)", [], False, [], [], [], [], [], [], [], []],
87: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 36 (main)", [8], False, [], [], [], [], [], [], [], []],
88: [False, [87], 2, [1,5,0,b"\x00"], 0, "Inca: Map 36 (exit opened)", [], False, [], [], [], [], [], [], [], []],
89: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 37 (main area)", [7], False, [], [], [], [], [], [], [], []],
90: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 37 (tile bridge)", [], False, [], [], [], [], [], [], [], []], # Check for potential softlock?
91: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (south section)", [], False, [], [], [], [], [], [], [], []],
92: [False, [91], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (behind statues)", [], False, [], [], [], [], [], [], [], []],
93: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (north section)", [], False, [], [], [], [], [], [], [], []],
94: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 39", [], False, [], [], [], [], [], [], [], []],
95: [False, [96], 2, [1,5,0,b"\x00"], 0, "Inca: Map 40 (entrance)", [], False, [], [], [], [], [], [], [], []],
96: [False, [95], 2, [1,5,0,b"\x00"], 0, "Inca: Map 40 (past tiles)", [], False, [], [], [], [], [], [], [], []],
97: [False, [98,503], 2, [1,5,0,b"\x00"], 0, "Inca: Boss Room", [], True, [], [], [], [], [], [], [], []], # might need to add an exit for this
98: [False, [97], 2, [1,5,0,b"\x00"], 0, "Inca: Behind Boss Room", [], False, [], [], [], [], [], [], [], []],
99: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SE door)", [], False, [], [], [], [], [], [], [], []],
# Gold Ship / Diamond Coast
100: [False, [104], 1, [1,5,0,b"\x00"], 0, "Gold Ship: Deck", [], False, [], [], [], [], [], [], [], []],
101: [False, [], 2, [1,5,0,b"\x00"], 0, "Gold Ship: Interior", [], False, [], [], [], [], [], [], [], []],
102: [False, [11], 1, [2,6,0,b"\x00"], 0, "Diamond Coast: Main Area", [], False, [], [], [], [], [], [], [], []],
103: [False, [], 2, [2,6,0,b"\x00"], 0, "Diamond Coast: House", [], False, [], [], [], [], [], [], [], []],
104: [False, [], 0, [1,5,0,b"\x00"], 0, "Gold Ship: Crow's Nest Passage", [], False, [], [], [], [], [], [], [], []],
# Freejia
110: [False, [11], 1, [2,7,0,b"\x00"], 0, "Freejia: Main Area", [], False, [], [], [], [], [], [], [], []],
111: [False, [1, 110], 1, [2,7,0,b"\x00"], 0, "Freejia: 2-story House Roof", [], False, [], [], [], [], [], [], [], []],
112: [False, [], 1, [2,7,0,b"\x00"], 0, "Freejia: Laborer House Roof", [], False, [], [], [], [], [], [], [], []],
113: [False, [110, 114], 1, [2,7,0,b"\x00"], 0, "Freejia: Labor Trade Roof", [], False, [], [], [], [], [], [], [], []],
114: [False, [110, 112], 1, [2,7,0,b"\x00"], 0, "Freejia: Back Alley", [], False, [], [], [], [], [], [], [], []],
115: [False, [110], 0, [2,7,0,b"\x00"], 0, "Freejia: Slaver", [], False, [], [], [], [], [], [], [], []],
116: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: West House", [], False, [], [], [], [], [], [], [], []],
117: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: 2-story House", [], False, [], [], [], [], [], [], [], []],
118: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Lovers' House", [], False, [], [], [], [], [], [], [], []],
119: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (common area)", [], False, [], [], [], [], [], [], [], []],
120: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (west room)", [], False, [], [], [], [], [], [], [], []],
121: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (east room)", [], False, [], [], [], [], [], [], [], []],
122: [False, [504], 2, [2,7,0,b"\x00"], 0, "Freejia: Laborer House", [], False, [], [], [], [], [], [], [], []],
123: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Messy House", [], False, [], [], [], [], [], [], [], []],
124: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Erik House", [], False, [], [], [], [], [], [], [], []],
125: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Dark Space House", [], False, [], [], [], [], [], [], [], []],
126: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Labor Trade House", [], False, [], [], [], [], [], [], [], []],
127: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Labor Market", [], False, [], [], [], [], [], [], [], []],
# Diamond Mine
130: [False, [131], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (entrance)", [], False, [], [], [], [], [], [], [], []],
131: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (behind barriers)", [], False, [], [], [], [], [], [], [], []],
132: [False, [131], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (false wall)", [], False, [], [], [], [], [], [], [], []],
133: [False, [11], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 62", [], False, [], [], [], [], [], [], [], []],
134: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 63 (main)", [], False, [], [], [], [], [], [], [], []],
135: [False, [134], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 63 (elevator)", [], False, [], [], [], [], [], [], [], []],
136: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 64 (main)", [], False, [], [], [], [], [], [], [], []],
137: [False, [136], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 64 (trapped laborer)", [], False, [], [], [], [], [], [], [], []],
138: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 65 (main)", [], False, [], [], [], [], [], [], [], []],
139: [False, [138], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 65 (behind ramp)", [], False, [], [], [], [], [], [], [], []],
140: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (elevator 1)", [], False, [], [], [], [], [], [], [], []],
141: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (elevator 2)", [], False, [], [], [], [], [], [], [], []],
142: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (Dark Space)", [], False, [], [], [], [], [], [], [], []],
143: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (laborer)", [], False, [], [], [], [], [], [], [], []],
144: [False, [145], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 67 (entrance)", [], False, [], [], [], [], [], [], [], []],
145: [False, [144], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 67 (exit)", [], False, [], [], [], [], [], [], [], []], # potential softlock?
146: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 68 (main)", [], False, [], [], [], [], [], [], [], []],
147: [False, [146], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 68 (door open)", [], False, [], [], [], [], [], [], [], []],
148: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 69", [], False, [], [], [], [], [], [], [], []],
149: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 70", [], False, [], [], [], [], [], [], [], []],
150: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 71", [], False, [], [], [], [], [], [], [], []],
# Neil's Cottage / Nazca
160: [False, [11], 2, [2,9,0,b"\x00"], 0, "Neil's Cottage", [13], False, [], [], [], [], [], [], [], []],
161: [False, [17,160,505], 2, [2,9,0,b"\x00"], 0, "Neil's Cottage: Neil", [], False, [], [], [], [], [], [], [], []],
162: [False, [11], 1, [2,10,0,b"\x00"], 0, "Nazca Plain", [], False, [], [], [], [], [], [], [], []],
# Sky Garden
167: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (SE)", [], False, [], [], [], [], [], [], [], []],
168: [False, [181], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (north)", [], False, [], [], [], [], [], [], [], []],
169: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 86 (DS Room)", [], False, [], [], [], [], [], [], [], []],
170: [False, [], 1, [2,10,0,b"\x00"], 0, "Sky Garden: Foyer", [14, 14, 14, 14], False, [], [], [], [], [], [], [], []],
171: [False, [], 1, [2,10,0,b"\x00"], 0, "Sky Garden: Boss Entrance", [], False, [], [], [], [], [], [], [], []],
172: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (main)", [], False, [], [], [], [], [], [], [], []],
173: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (SW)", [], False, [], [], [], [], [], [], [], []],
174: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (SE)", [], False, [], [], [], [], [], [], [], []],
175: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 78", [], False, [], [], [], [], [], [], [], []],
176: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (main)", [], False, [], [], [], [], [], [], [], []],
177: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (center)", [], False, [], [], [], [], [], [], [], []],
178: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (behind barrier)", [], False, [], [], [], [], [], [], [], []],
179: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 80 (north)", [], False, [], [], [], [], [], [], [], []],
180: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 80 (south)", [], False, [], [], [], [], [], [], [], []],
181: [False, [168], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (main)", [], False, [], [], [], [], [], [], [], []],
182: [False, [181], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (west)", [], False, [], [], [], [], [], [], [], []],
183: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (Dark Space cage)", [], False, [], [], [], [], [], [], [], []],
184: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (SE platform)", [], False, [], [], [], [], [], [], [], []],
185: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (SW platform)", [], False, [], [], [], [], [], [], [], []],
186: [False, [506], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (north)", [], False, [], [], [], [], [], [], [], []], # deal with switches
187: [False, [508], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (south)", [], False, [], [], [], [], [], [], [], []],
188: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (NE)", [], False, [], [], [], [], [], [], [], []],
189: [False, [188,507], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (switch cage)", [], False, [], [], [], [], [], [], [], []],
190: [False, [191], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (NE)", [], False, [], [], [], [], [], [], [], []],
191: [False, [190, 192], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (NW)", [], False, [], [], [], [], [], [], [], []],
192: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (center)", [], False, [], [], [], [], [], [], [], []],
193: [False, [194], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (SW)", [], False, [], [], [], [], [], [], [], []],
194: [False, [167], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (chests)", [], False, [], [], [], [], [], [], [], []],
195: [False, [196], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (main)", [], False, [], [], [], [], [], [], [], []],
196: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (NE)", [], False, [], [], [], [], [], [], [], []],
197: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (behind statue)", [], False, [], [], [], [], [], [], [], []],
198: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Boss Room", [], True, [], [], [], [], [], [], [], []],
199: [False, [197,509], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (statue)", [], False, [], [], [], [], [], [], [], []],
# Seaside Palace
200: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1", [16], False, [], [], [], [], [], [], [], []],
201: [False, [200], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 (door unlocked)", [], False, [], [], [], [], [], [], [], []],
202: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 NE Room", [], False, [], [], [], [], [], [], [], []],
203: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 NW Room", [], False, [], [], [], [], [], [], [], []],
204: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 SE Room", [], False, [], [], [], [], [], [], [], []],
205: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 2", [], False, [], [], [], [], [], [], [], []],
206: [False, [200], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Buffy", [], False, [], [], [], [], [], [], [], []],
207: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 2 SW Room", [], False, [], [], [], [], [], [], [], []],
208: [False, [205], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Coffin", [], False, [], [], [], [], [], [], [], []],
209: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Fountain", [17], False, [], [], [], [], [], [], [], []],
210: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Mu Passage", [16], False, [], [], [], [], [], [], [], []],
211: [False, [210], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Mu Passage (door unlocked)", [], False, [], [], [], [], [], [], [], []],
# Mu
212: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 95 (top)", [], False, [], [], [], [], [], [], [], []],
213: [False, [212], 2, [3,12,1,b"\x00"], 0, "Mu: Map 95 (middle E)", [], False, [], [], [], [], [], [], [], []],
214: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 95 (middle W)", [], False, [], [], [], [], [], [], [], []],
215: [False, [213], 2, [3,12,2,b"\x00"], 0, "Mu: Map 95 (bottom E)", [], False, [], [], [], [], [], [], [], []],
216: [False, [214], 2, [3,12,2,b"\x00"], 0, "Mu: Map 95 (bottom W)", [], False, [], [], [], [], [], [], [], []],
217: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 96 (top)", [], False, [], [], [], [], [], [], [], []],
218: [False, [217], 2, [3,12,1,b"\x00"], 0, "Mu: Map 96 (middle)", [], False, [], [], [], [], [], [], [], []],
219: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 96 (bottom)", [], False, [], [], [], [], [], [], [], []],
220: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 97 (top main)", [], False, [], [], [], [], [], [], [], []],
221: [False, [222, 223], 2, [3,12,0,b"\x00"], 0, "Mu: Map 97 (top island)", [], False, [], [], [], [], [], [], [], []],
222: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 97 (middle NE)", [], False, [], [], [], [], [], [], [], []],
223: [False, [221], 2, [3,12,1,b"\x00"], 0, "Mu: Map 97 (middle SW)", [], False, [], [], [], [], [], [], [], []],
224: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 97 (bottom)", [], False, [], [], [], [], [], [], [], []],
225: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top S)", [], False, [], [], [], [], [], [], [], []],
226: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top N)", [], False, [], [], [], [], [], [], [], []],
227: [False, [226], 2, [3,12,1,b"\x00"], 0, "Mu: Map 98 (middle E)", [], False, [], [], [], [], [], [], [], []],
228: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 98 (middle W)", [], False, [], [], [], [], [], [], [], []],
229: [False, [227], 2, [3,12,2,b"\x00"], 0, "Mu: Map 98 (bottom E)", [], False, [], [], [], [], [], [], [], []],
230: [False, [228], 2, [3,12,2,b"\x00"], 0, "Mu: Map 98 (bottom W)", [], False, [], [], [], [], [], [], [], []],
231: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 99 (Room of Hope 1)", [18], False, [], [], [], [], [], [], [], []],
232: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 99 (Room of Hope 2)", [18], False, [], [], [], [], [], [], [], []],
233: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 100 (middle E)", [], False, [], [], [], [], [], [], [], []],
234: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 100 (middle W)", [], False, [], [], [], [], [], [], [], []],
235: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 100 (bottom)", [], False, [], [], [], [], [], [], [], []],
236: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 101 (top)", [], False, [], [], [], [], [], [], [], []],
237: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 101 (middle W)", [], False, [], [], [], [], [], [], [], []],
238: [False, [236], 2, [3,12,1,b"\x00"], 0, "Mu: Map 101 (middle E)", [], False, [], [], [], [], [], [], [], []],
239: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 101 (bottom)", [], False, [], [], [], [], [], [], [], []],
240: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (pedestals)", [19, 19], False, [], [], [], [], [], [], [], []],
241: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (statues placed)", [], False, [], [], [], [], [], [], [], []], # might need an exit for this
242: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (statue get)", [], False, [], [], [], [], [], [], [], []],
243: [False, [244], 2, [3,12,0,b"\x00"], 0, "Mu: Boss Room (entryway)", [], False, [], [], [], [], [], [], [], []], # Might need to add an exit for this?
244: [False, [242,243], 2, [3,12,0,b"\x00"], 0, "Mu: Boss Room (main)", [], True, [], [], [], [], [], [], [], []],
245: [False, [212], 2, [3,12,0,b"\x00"], 0, "Mu: Map 95 (top, Slider exit)", [], False, [], [], [], [], [], [], [], []],
246: [False, [226], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top, Slider exit)", [], False, [], [], [], [], [], [], [], []],
247: [False, [231,511], 0, [3,12,0,b"\x00"], 0, "Mu: Water lowered 1", [], False, [], [], [], [], [], [], [], []],
248: [False, [232,512], 0, [3,12,0,b"\x00"], 0, "Mu: Water lowered 2", [], False, [], [], [], [], [], [], [], []],
# Angel Village
250: [False, [12], 1, [3,13,0,b"\x00"], 0, "Angel Village: Outside", [], True, [], [], [], [], [], [], [], []],
251: [False, [1], 2, [3,13,0,b"\x00"], 0, "Angel Village: Underground", [], False, [], [], [], [], [], [], [], []],
252: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 1", [], False, [], [], [], [], [], [], [], []],
253: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 2", [], False, [], [], [], [], [], [], [], []],
254: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Dance Hall", [], False, [], [], [], [], [], [], [], []],
255: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: DS Room", [], False, [], [], [], [], [], [], [], []],
#256: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 3", [], False, [], [], [], [], [], [], [], []],
# Angel Dungeon
260: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 109", [], False, [], [], [], [], [], [], [], []],
261: [False, [278], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 110 (main)", [], False, [], [], [], [], [], [], [], []],
262: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 111", [], False, [], [], [], [], [], [], [], []],
263: [False, [279], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (main)", [], False, [], [], [], [], [], [], [], []],
264: [False, [263], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (slider)", [], False, [], [], [], [], [], [], [], []],
265: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (alcove)", [], False, [], [], [], [], [], [], [], []],
266: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 113", [], False, [], [], [], [], [], [], [], []],
267: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 114 (main)", [], False, [], [], [], [], [], [], [], []],
268: [False, [267], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 114 (slider exit)", [], False, [], [], [], [], [], [], [], []],
269: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (main)", [], False, [], [], [], [], [], [], [], []],
270: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (portrait room)", [], False, [], [], [], [], [], [], [], []],
271: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (side room)", [], False, [], [], [], [], [], [], [], []],
272: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (Ishtar's room)", [], False, [], [], [], [], [], [], [], []],
273: [False, [272], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (Ishtar's chest)", [], False, [], [], [], [], [], [], [], []],
274: [False, [513], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Puzzle Room", [], False, [], [], [], [], [], [], [], []],
275: [False, [265], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (alcove slider)", [], False, [], [], [], [], [], [], [], []],
276: [False, [277], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (slider exit)", [], False, [], [], [], [], [], [], [], []],
277: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (foyer)", [], False, [], [], [], [], [], [], [], []],
278: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 110 (past Draco)", [], False, [], [], [], [], [], [], [], []],
279: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (past Draco)", [], False, [], [], [], [], [], [], [], []],
# Watermia
280: [False, [12], 1, [3,14,0,b"\x00"], 0, "Watermia: Main Area", [24], False, [], [], [], [], [], [], [], []],
#281: [False, [15,280], 0, [3,14,0,b"\x00"], 0, "Watermia: Bridge Man", [], False, [], [], [], [], [], [], [], []],
282: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: DS House", [], False, [], [], [], [], [], [], [], []],
283: [False, [1], 2, [3,14,0,b"\x00"], 0, "Watermia: Gambling House", [], False, [], [], [], [], [], [], [], []],
284: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: West House", [], False, [], [], [], [], [], [], [], []],
285: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: East House", [], False, [], [], [], [], [], [], [], []],
286: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: Lance's House", [], False, [], [], [], [], [], [], [], []],
287: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: NW House", [], False, [], [], [], [], [], [], [], []],
288: [False, [280], 0, [3,14,0,b"\x00"], 0, "Watermia: Stablemaster", [], True, [], [], [], [], [], [], [], []],
# Great Wall
290: [False, [12], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 130", [], False, [], [], [], [], [], [], [], []],
291: [False, [292], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (NW)", [], False, [], [], [], [], [], [], [], []],
292: [False, [293], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (S)", [], False, [], [], [], [], [], [], [], []],
293: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (NE)", [], False, [], [], [], [], [], [], [], []],
294: [False, [296], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (W)", [], False, [], [], [], [], [], [], [], []],
295: [False, [296], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (center)", [], False, [], [], [], [], [], [], [], []],
296: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (E)", [], False, [], [], [], [], [], [], [], []],
297: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 134", [], False, [], [], [], [], [], [], [], []],
298: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 135 (W)", [], False, [], [], [], [], [], [], [], []],
299: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 135 (E)", [], False, [], [], [], [], [], [], [], []],
300: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 136 (W)", [], False, [], [], [], [], [], [], [], []],
301: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 136 (E)", [], False, [], [], [], [], [], [], [], []],
302: [False, [303], 2, [3,15,0,b"\x00"], 0, "Great Wall: Boss Room (entrance)", [], False, [], [], [], [], [], [], [], []],
303: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Boss Room (exit)", [], False, [], [], [], [], [], [], [], []],
# Euro
310: [False, [13], 1, [4,16,0,b"\x00"], 0, "Euro: Main Area", [24], False, [], [], [], [], [], [], [], []],
311: [False, [310], 0, [4,16,0,b"\x00"], 0, "Euro: Stablemaster", [], True, [], [], [], [], [], [], [], []],
312: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Rolek Company", [], False, [], [], [], [], [], [], [], []],
313: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: West House", [], False, [], [], [], [], [], [], [], []],
314: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Rolek Mansion", [40], False, [], [], [], [], [], [], [], []],
315: [False, [314], 0, [4,16,0,b"\x00"], 0, "Euro: Ann", [], False, [], [], [], [], [], [], [], []],
316: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Guest Room", [], False, [], [], [], [], [], [], [], []],
317: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Central House", [], False, [], [], [], [], [], [], [], []],
318: [False, [1], 2, [4,16,0,b"\x00"], 0, "Euro: Jeweler House", [], False, [], [], [], [], [], [], [], []],
319: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Twins House", [], False, [], [], [], [], [], [], [], []],
320: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Hidden House", [], False, [], [], [], [], [], [], [], []],
321: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Shrine", [], False, [], [], [], [], [], [], [], []],
322: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Explorer's House", [], False, [], [], [], [], [], [], [], []],
323: [False, [324], 2, [4,16,0,b"\x00"], 0, "Euro: Store Entrance", [], False, [], [], [], [], [], [], [], []],
324: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Store Exit", [], False, [], [], [], [], [], [], [], []],
325: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Dark Space House", [], False, [], [], [], [], [], [], [], []],
# Mt. Kress
330: [False, [13], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 160", [], False, [], [], [], [], [], [], [], []],
331: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 161 (E)", [], False, [], [], [], [], [], [], [], []],
332: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 161 (W)", [], False, [], [], [], [], [], [], [], []],
333: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (main)", [26], False, [], [], [], [], [], [], [], []],
334: [False, [333], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (S)", [], False, [], [], [], [], [], [], [], []],
335: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (NW)", [], False, [], [], [], [], [], [], [], []],
336: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (SE)", [], False, [], [], [], [], [], [], [], []],
337: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 163", [], False, [], [], [], [], [], [], [], []],
338: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 164", [], False, [], [], [], [], [], [], [], []],
339: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (S)", [26], False, [], [], [], [], [], [], [], []],
340: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (NE)", [26], False, [], [], [], [], [], [], [], []],
341: [False, [338], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (NW)", [], False, [], [], [], [], [], [], [], []],
342: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 166", [], False, [], [], [], [], [], [], [], []],
343: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 167", [], False, [], [], [], [], [], [], [], []],
344: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 168", [], False, [], [], [], [], [], [], [], []],
345: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 169", [], False, [], [], [], [], [], [], [], []],
# Natives' Village
350: [False, [13], 1, [4,18,0,b"\x00"], 0, "Natives' Village: Main Area", [10], False, [], [], [], [], [], [], [], []],
351: [False, [350], 0, [4,18,0,b"\x00"], 0, "Natives' Village: Child Guide", [], True, [], [], [], [], [], [], [], []],
352: [False, [], 2, [4,18,0,b"\x00"], 0, "Natives' Village: West House", [], False, [], [], [], [], [], [], [], []],
353: [False, [], 2, [4,18,0,b"\x00"], 0, "Natives' Village: House w/Statues", [29], False, [], [], [], [], [], [], [], []],
354: [False, [353], 0, [4,18,0,b"\x00"], 0, "Natives' Village: Statues Awake", [], False, [], [], [], [], [], [], [], []],
# Ankor Wat
360: [False, [13], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 176", [], False, [], [], [], [], [], [], [], []],
361: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 177 (E)", [], False, [], [], [], [], [], [], [], []],
362: [False, [361], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 177 (W)", [], False, [], [], [], [], [], [], [], []],
363: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (S)", [], False, [], [], [], [], [], [], [], []],
364: [False, [363], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (center)", [], False, [], [], [], [], [], [], [], []],
365: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (N)", [], False, [], [], [], [], [], [], [], []],
366: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 179 (E)", [], False, [], [], [], [], [], [], [], []],
367: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 179 (W)", [], False, [], [], [], [], [], [], [], []],
368: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 180", [], False, [], [], [], [], [], [], [], []],
369: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (N)", [], False, [], [], [], [], [], [], [], []],
370: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (center)", [], False, [], [], [], [], [], [], [], []],
371: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (S)", [], False, [], [], [], [], [], [], [], []],
372: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 182", [], False, [], [], [], [], [], [], [], []],
373: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (S)", [], False, [], [], [], [], [], [], [], []],
374: [False, [373], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (NW)", [], False, [], [], [], [], [], [], [], []],
375: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (NE)", [], False, [], [], [], [], [], [], [], []],
376: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 184 (S)", [], False, [], [], [], [], [], [], [], []],
377: [False, [376], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 184 (N)", [], False, [], [], [], [], [], [], [], []],
378: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 185", [], False, [], [], [], [], [], [], [], []],
379: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 186 (main)", [], False, [], [], [], [], [], [], [], []],
380: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 186 (NE)", [], False, [], [], [], [], [], [], [], []],
381: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (main)", [], False, [], [], [], [], [], [], [], []],
382: [False, [381], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (chest)", [], False, [], [], [], [], [], [], [], []],
383: [False, [381], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (Dark Space)", [], False, [], [], [], [], [], [], [], []],
384: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (N bright)", [], False, [], [], [], [], [], [], [], []],
385: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (S bright)", [], False, [], [], [], [], [], [], [], []],
386: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (floor S)", [], False, [], [], [], [], [], [], [], []],
387: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (floor N)", [], False, [], [], [], [], [], [], [], []],
388: [False, [386], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (platform)", [], False, [], [], [], [], [], [], [], []],
389: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 190 (E)", [], False, [], [], [], [], [], [], [], []],
390: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 190 (W)", [], False, [], [], [], [], [], [], [], []],
391: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 191", [], False, [], [], [], [], [], [], [], []],
392: [False, [384], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (N)", [], False, [], [], [], [], [], [], [], []],
393: [False, [385], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (S)", [], False, [], [], [], [], [], [], [], []],
# Dao
400: [False, [1,14], 1, [5,20,0,b"\x00"], 0, "Dao: Main Area", [], False, [], [], [], [], [], [], [], []],
401: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: NW House", [], False, [], [], [], [], [], [], [], []],
402: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: Neil's House", [], False, [], [], [], [], [], [], [], []],
403: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: Snake Game", [], False, [], [], [], [], [], [], [], []],
404: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: SW House", [], False, [], [], [], [], [], [], [], []],
405: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: S House", [], False, [], [], [], [], [], [], [], []],
406: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: SE House", [], False, [], [], [], [], [], [], [], []],
# Pyramid
410: [False, [14], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (main)", [], False, [], [], [], [], [], [], [], []],
411: [False, [410], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (behind orbs)", [], False, [], [], [], [], [], [], [], []],
412: [False, [413], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (hidden platform)", [], False, [], [], [], [], [], [], [], []],
413: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (bottom)", [], False, [], [], [], [], [], [], [], []],
414: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (boss entrance)", [], False, [], [], [], [], [], [], [], []],
415: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph room", [30, 31, 32, 33, 34, 35, 38], False, [], [], [], [], [], [], [], []],
416: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 206 (E)", [], False, [], [], [], [], [], [], [], []],
417: [False, [416], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 206 (W)", [], False, [], [], [], [], [], [], [], []],
418: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 207 (NE)", [], False, [], [], [], [], [], [], [], []],
419: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 207 (SW)", [], False, [], [], [], [], [], [], [], []],
420: [False, [421], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 208 (N)", [], False, [], [], [], [], [], [], [], []],
421: [False, [420], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 208 (S)", [], False, [], [], [], [], [], [], [], []],
422: [False, [423], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 209 (W)", [], False, [], [], [], [], [], [], [], []],
423: [False, [422,411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 209 (E)", [], False, [], [], [], [], [], [], [], []],
424: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 210", [], False, [], [], [], [], [], [], [], []],
425: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 211", [], False, [], [], [], [], [], [], [], []],
426: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (N)", [], False, [], [], [], [], [], [], [], []],
427: [False, [426], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (center)", [], False, [], [], [], [], [], [], [], []],
428: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (SE)", [], False, [], [], [], [], [], [], [], []],
429: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (SW)", [], False, [], [], [], [], [], [], [], []],
430: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 213", [], False, [], [], [], [], [], [], [], []],
431: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (NW)", [], False, [], [], [], [], [], [], [], []],
432: [False, [431], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (NE)", [], False, [], [], [], [], [], [], [], []],
433: [False, [431,434], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (SE)", [], False, [], [], [], [], [], [], [], []],
434: [False, [433], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (SW)", [], False, [], [], [], [], [], [], [], []],
435: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 215 (main)", [], False, [], [], [], [], [], [], [], []],
436: [False, [437], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 216 (N)", [], False, [], [], [], [], [], [], [], []],
437: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 216 (S)", [], False, [], [], [], [], [], [], [], []],
438: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 217 (W)", [], False, [], [], [], [], [], [], [], []],
439: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 217 (E)", [], False, [], [], [], [], [], [], [], []],
440: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 219 (W)", [], False, [], [], [], [], [], [], [], []],
441: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 219 (E)", [], False, [], [], [], [], [], [], [], []],
442: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 1", [], False, [], [], [], [], [], [], [], []],
443: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 2", [], False, [], [], [], [], [], [], [], []],
444: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 3", [], False, [], [], [], [], [], [], [], []],
445: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 4", [], False, [], [], [], [], [], [], [], []],
446: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 5", [], False, [], [], [], [], [], [], [], []],
447: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 6", [], False, [], [], [], [], [], [], [], []],
448: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Boss Room", [], True, [], [], [], [], [], [], [], []],
449: [False, [415,517], 0, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyphs Placed", [], False, [], [], [], [], [], [], [], []],
450: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 215 (past Killer 6)", [], False, [], [], [], [], [], [], [], []],
# Babel
460: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Foyer", [], False, [], [], [], [], [], [], [], []],
461: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 223 (bottom)", [], False, [], [], [], [], [], [], [], []],
462: [False, [461], 2, [6,22,0,b"\x00"], 0, "Babel: Map 223 (top)", [], False, [], [], [], [], [], [], [], []],
463: [False, [518,519],2, [6,22,0,b"\x00"], 0, "Babel: Map 224 (bottom)", [], False, [], [], [], [], [], [], [], []],
464: [False, [520,521],2, [6,22,0,b"\x00"], 0, "Babel: Map 224 (top)", [], False, [], [], [], [], [], [], [], []],
465: [False, [466], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (SW)", [], False, [], [], [], [], [], [], [], []],
466: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (NW)", [], False, [], [], [], [], [], [], [], []],
467: [False, [468], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (SE)", [], False, [], [], [], [], [], [], [], []],
468: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (NE)", [], False, [], [], [], [], [], [], [], []],
469: [False, [470], 2, [6,22,0,b"\x00"], 0, "Babel: Map 226 (bottom)", [], False, [], [], [], [], [], [], [], []],
470: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 226 (top)", [], False, [], [], [], [], [], [], [], []],
471: [False, [522], 2, [6,22,0,b"\x00"], 0, "Babel: Map 227 (bottom)", [], False, [], [], [], [], [], [], [], []],
472: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 227 (top)", [], False, [], [], [], [], [], [], [], []],
473: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Olman's Room", [], False, [], [], [], [], [], [], [], []],
474: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Castoth", [], False, [], [], [], [], [], [], [], []],
475: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Viper", [], False, [], [], [], [], [], [], [], []],
476: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Vampires", [], False, [], [], [], [], [], [], [], []],
477: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Sand Fanger", [], False, [], [], [], [], [], [], [], []],
478: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Mummy Queen", [], False, [], [], [], [], [], [], [], []],
479: [False, [473], 0, [6,22,0,b"\x00"], 0, "Babel: Statue Get", [], False, [], [], [], [], [], [], [], []],
# Jeweler's Mansion
480: [False, [], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Main", [], False, [], [], [], [], [], [], [], []],
481: [False, [], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Behind Psycho Slider", [], False, [], [], [], [], [], [], [], []],
482: [False, [523], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Solid Arm", [], False, [], [], [], [], [], [], [], []],
# Game End
490: [False, [500], 0, [0,0,0,b"\x00"], 0, "<NAME>", [], False, [], [], [], [], [], [], [], []],
491: [False, [], 0, [0,0,0,b"\x00"], 0, "Firebird", [], False, [], [], [], [], [], [], [], []],
492: [False, [491], 0, [0,0,0,b"\x00"], 0, "Dark Gaia/End Game", [], False, [], [], [], [], [], [], [], []],
# Event Switches
500: [False, [], 0, [0,0,0,b"\x00"], 0, "Kara ", [], False, [], [], [], [], [], [], [], []],
501: [False, [], 0, [0,0,0,b"\x00"], 0, "Lilly ", [], False, [], [], [], [], [], [], [], []],
502: [False, [], 0, [0,0,0,b"\x00"], 0, "Moon Tribe: Spirits Healed ", [], False, [], [], [], [], [], [], [], []],
503: [False, [], 0, [0,0,0,b"\x00"], 0, "Inca: Castoth defeated ", [], False, [], [], [], [], [], [], [], []],
504: [False, [], 0, [0,0,0,b"\x00"], 0, "Freejia: Found Laborer ", [], False, [], [], [], [], [], [], [], []],
505: [False, [], 0, [0,0,0,b"\x00"], 0, "Neil's Memory Restored ", [], False, [], [], [], [], [], [], [], []],
506: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 NW Switch ", [], False, [], [], [], [], [], [], [], []],
507: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 NE Switch ", [], False, [], [], [], [], [], [], [], []],
508: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 SE Switch ", [], False, [], [], [], [], [], [], [], []],
509: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 84 Switch ", [], False, [], [], [], [], [], [], [], []],
510: [False, [], 0, [0,0,0,b"\x00"], 0, "Seaside: Fountain Purified ", [], False, [], [], [], [], [], [], [], []],
511: [False, [], 0, [0,0,0,b"\x00"], 0, "Mu: Water Lowered 1 ", [], False, [], [], [], [], [], [], [], []],
512: [False, [], 0, [0,0,0,b"\x00"], 0, "Mu: Water Lowered 2 ", [], False, [], [], [], [], [], [], [], []],
513: [False, [], 0, [0,0,0,b"\x00"], 0, "Angel: Puzzle Complete ", [], False, [], [], [], [], [], [], [], []],
514: [False, [333,335], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 1 ", [], False, [], [], [], [], [], [], [], []],
515: [False, [339,340], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 2 ", [], False, [], [], [], [], [], [], [], []],
516: [False, [340,341], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 3 ", [], False, [], [], [], [], [], [], [], []],
517: [False, [], 0, [0,0,0,b"\x00"], 0, "Pyramid: Hieroglyphs placed ", [], False, [], [], [], [], [], [], [], []],
518: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Castoth defeated ", [], False, [], [], [], [], [], [], [], []],
519: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Viper defeated ", [], False, [], [], [], [], [], [], [], []],
520: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Vampires defeated ", [], False, [], [], [], [], [], [], [], []],
521: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Sand Fanger defeated ", [], False, [], [], [], [], [], [], [], []],
522: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Mummy Queen defeated ", [], False, [], [], [], [], [], [], [], []],
523: [False, [], 0, [0,0,0,b"\x00"], 0, "Mansion: Solid Arm defeated ", [], False, [], [], [], [], [], [], [], []],
# Misc
600: [False, [], 0, [0,0,0,b"\x00"], 0, "Freedan Access ", [], False, [], [], [], [], [], [], [], []],
601: [False, [], 0, [0,0,0,b"\x00"], 0, "Glitches ", [], False, [], [], [], [], [], [], [], []],
602: [False, [], 0, [0,0,0,b"\x00"], 0, "Early Firebird ", [], False, [], [], [], [], [], [], [], []],
INACCESSIBLE: [False, [], 0, [0,0,0,b"\x00"], 0, "Inaccessible", [], False, [], [], [], [], [], [], [], []]
}
# Define logical paths in dynamic graph
# Format: { ID: [Status(-1=restricted,0=locked,1=unlocked,2=forced_open), StartRegion, DestRegion, NeedFreedan, [[item1, qty1],[item2,qty2]...]]}
self.logic = {
# Jeweler Rewards
0: [0, 1, 2, False, [[1, gem[0]]]], # Jeweler Reward 1
1: [0, 1, 2, False, [[1, gem[0] - 2], [41, 1]]],
2: [0, 1, 2, False, [[1, gem[0] - 3], [42, 1]]],
3: [0, 1, 2, False, [[1, gem[0] - 5], [41, 1], [42, 1]]],
4: [0, 2, 3, False, [[1, gem[1]]]], # Jeweler Reward 2
5: [0, 2, 3, False, [[1, gem[1] - 2], [41, 1]]],
6: [0, 2, 3, False, [[1, gem[1] - 3], [42, 1]]],
7: [0, 2, 3, False, [[1, gem[1] - 5], [41, 1], [42, 1]]],
8: [0, 3, 4, False, [[1, gem[2]]]], # Jeweler Reward 3
9: [0, 3, 4, False, [[1, gem[2] - 2], [41, 1]]],
10: [0, 3, 4, False, [[1, gem[2] - 3], [42, 1]]],
11: [0, 3, 4, False, [[1, gem[2] - 5], [41, 1], [42, 1]]],
12: [0, 4, 5, False, [[1, gem[3]]]], # Jeweler Reward 4
13: [0, 4, 5, False, [[1, gem[3] - 2], [41, 1]]],
14: [0, 4, 5, False, [[1, gem[3] - 3], [42, 1]]],
15: [0, 4, 5, False, [[1, gem[3] - 5], [41, 1], [42, 1]]],
16: [0, 5, 6, False, [[1, gem[4]]]], # Jeweler Reward 5
17: [0, 5, 6, False, [[1, gem[4] - 2], [41, 1]]],
18: [0, 5, 6, False, [[1, gem[4] - 3], [42, 1]]],
19: [0, 5, 6, False, [[1, gem[4] - 5], [41, 1], [42, 1]]],
20: [0, 6, 7, False, [[1, gem[5]]]], # Jeweler Reward 6
21: [0, 6, 7, False, [[1, gem[5] - 2], [41, 1]]],
22: [0, 6, 7, False, [[1, gem[5] - 3], [42, 1]]],
23: [0, 6, 7, False, [[1, gem[5] - 5], [41, 1], [42, 1]]],
24: [0, 7, 8, False, [[1, gem[6]]]], # Jeweler Reward 7 (Mansion)
25: [0, 7, 8, False, [[1, gem[6] - 2], [41, 1]]],
26: [0, 7, 8, False, [[1, gem[6] - 3], [42, 1]]],
27: [0, 7, 8, False, [[1, gem[6] - 5], [41, 1], [42, 1]]],
# Inter-Continental Travel
30: [0, 28, 15, False, [[37, 1]]], # South Cape: Erik w/ Lola's Letter
31: [0, 102, 15, False, [[37, 1]]], # Coast: Turbo w/ Lola's Letter
32: [0, 280, 15, False, [[37, 1]]], # Watermia: Bridgeman w/ Lola's Letter
33: [0, 160, 161, False, [[13, 1]]], # Neil's: Neil w/ Memory Melody
34: [0, 314, 17, False, [[505, 1]]], # Euro: Neil w/ Memory restored
35: [0, 402, 17, False, [[505, 1]]], # Dao: Neil w/ Memory restored
36: [0, 60, 64, False, [[25, 1]]], # Moon Tribe healed w/ Teapot
37: [0, 170, 16, False, [[502, 1]]], # Sky Garden: Spirits w/ spirits healed
38: [0, 280, 288, False, [[24, 1]]], # Watermia: Stablemaster w/ Will
39: [0, 310, 311, False, [[24, 1]]], # Euro: Stablemaster w/ Will
40: [0, 350, 351, False, [[10, 1]]], # Natives': Child Guide w/ Large Roast
# Edward's / Tunnel
60: [0, 32, 33, False, [[2, 1]]], # Escape cell w/Prison Key
61: [0, 33, 32, False, [[2, 1]]], # Enter cell w/Prison Key
62: [0, 45, 46, False, [[501, 1]]], # Progression w/ Lilly
63: [0, 47, 48, True, []], # Activate Bridge w/ Freedan
# Itory
70: [0, 50, 51, False, [[9, 1]]], # Town appears w/ Lola's Melody
71: [0, 55, 59, False, [[23, 1]]], # Get Lilly w/ Necklace
72: [0, 56, 57, False, [[61, 1]]], # Cave w/ Psycho Dash
73: [0, 56, 57, False, [[62, 1]]], # Cave w/ Psycho Slide
74: [0, 56, 57, False, [[63, 1]]], # Cave w/ Spin Dash
# Moon Tribe
80: [0, 61, 62, False, [[61, 1]]], # Cave challenge w/ Psycho Dash
81: [0, 61, 62, False, [[62, 1]]], # Cave challenge w/ Psycho Slide
82: [0, 61, 62, False, [[63, 1]]], # Cave challenge w/ Spin Dash
# Inca / Gold Ship / Freejia
89: [0, 72, 99, False, [[601, 1]]], # Map 29 progression w/ glitches
90: [0, 77, 78, False, [[3, 1], [4, 1]]], # Map 30 progression w/ Inca Statues
91: [0, 80, 81, False, [[61, 1]]], # Map 32 progression w/ Psycho Dash
92: [0, 80, 81, False, [[62, 1]]], # Map 32 progression w/ Psycho Slider
93: [0, 80, 81, False, [[63, 1]]], # Map 32 progression w/ Spin Dash
94: [0, 85, 86, True, []], # Map 35 progression w/ Freedan
95: [0, 87, 88, False, [[8, 1]]], # Map 36 progression w/ Wind Melody
96: [0, 89, 90, False, [[7, 1]]], # Map 37 progression w/ Diamond Block
97: [0, 91, 92, False, [[61, 1]]], # Map 38 progression w/ Psycho Dash
98: [0, 91, 92, False, [[62, 1]]], # Map 38 progression w/ Psycho Slider
99: [0, 91, 92, False, [[63, 1]]], # Map 38 progression w/ Spin Dash
#100: [0, 100, 104, False, [[100, 1]]], # Gold Ship progression w/ Statue 1
101: [0, 110, 115, False, [[504, 1]]], # Freejia: Slaver item w/ Laborer Found
# Diamond Mine
110: [0, 131, 132, False, [[61, 1]]], # Map 61 false wall w/ Psycho Dash
111: [0, 131, 132, False, [[62, 1]]], # Map 61 false wall w/ Psycho Slider
112: [0, 131, 132, False, [[63, 1]]], # Map 61 false wall w/ Spin Dash
113: [0, 134, 135, False, [[15, 1]]], # Map 63 progression w/ Elevator Key
114: [0, 136, 137, False, [[61, 1]]], # Map 64 trapped laborer w/ Psycho Dash
115: [0, 136, 137, False, [[62, 1]]], # Map 64 trapped laborer w/ Psycho Slider
116: [0, 136, 137, False, [[63, 1]]], # Map 64 trapped laborer w/ Spin Dash
117: [0, 138, 139, False, [[63, 1]]], # Map 65 progression w/ Spin Dash
118: [0, 138, 139, True, [[64, 1]]], # Map 65 progression w/ Dark Friar
119: [0, 146, 147, False, [[11, 1], [12, 1]]], # Map 68 progression w/ mine keys
# Sky Garden
130: [0, 170, 171, False, [[14, 4]]], # Boss access w/ Crystal Balls
131: [0, 177, 178, True, [[64, 1]]], # Map 79 progression w/ Dark Friar
132: [0, 177, 178, True, [[67, 1]]], # Map 79 progression w/ Firebird
133: [0, 168, 182, False, [[506, 1]]], # Map 81 progression w/ switch 1
134: [0, 182, 183, False, [[507, 1]]], # Map 81 progression w/ switch 2
135: [0, 182, 184, False, [[61, 1]]], # Map 81 progression w/ Psycho Dash
136: [0, 182, 184, False, [[62, 1]]], # Map 81 progression w/ Psycho Dash
137: [0, 182, 184, False, [[63, 1]]], # Map 81 progression w/ Psycho Dash
138: [0, 184, 185, False, [[508, 1], [61, 1]]], # Map 81 progression w/ switch 3 & Psycho Dash
139: [0, 184, 185, False, [[508, 1], [62, 1]]], # Map 81 progression w/ switch 3 & Psycho Slider
140: [0, 184, 185, False, [[508, 1], [63, 1]]], # Map 81 progression w/ switch 3 & Spin Dash
141: [0, 181, 182, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
142: [0, 181, 184, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
143: [0, 182, 185, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
144: [0, 188, 189, True, []], # Map 82 progression w/ Freedan
145: [0, 188, 189, False, [[601, 1]]], # Map 82 progression w/ Glitches
146: [0, 192, 190, False, [[63, 1]]], # Map 83 progression w/ Spin Dash
147: [0, 195, 199, True, [[64, 1]]], # Map 84 progression w/ Dark Friar
148: [0, 195, 199, True, [[67, 1]]], # Map 84 progression w/ Firebird
149: [0, 195, 199, True, [[65, 1]]], # Map 84 progression w/ Aura Barrier
150: [0, 197, 199, True, [[64, 1]]], # Map 84 progression w/ Dark Friar
151: [0, 197, 199, True, [[67, 1]]], # Map 84 progression w/ Firebird
152: [0, 170, 16, False, [[502, 1]]], # Moon Tribe passage w/ spirits healed
# Seaside Palace
160: [0, 205, 208, False, [[501, 1]]], # Coffin access w/ Lilly
161: [0, 209, 510, False, [[17, 1]]], # Purify fountain w/stone
162: [0, 200, 206, False, [[510, 1]]], # Buffy access w/ purified fountain
163: [0, 200, 201, False, [[16, 1]]], # Seaside to Mu w/ Mu key
164: [0, 210, 211, False, [[16, 1]]], # Mu to Seaside w/ Mu key
# Mu
170: [0, 212, 245, False, [[62, 1]]], # Map 95 progression w/ Psycho Slider
171: [0, 212, 213, False, [[511, 1]]], # Map 95 progression w/ water lowered 1
172: [0, 213, 215, False, [[512, 1]]], # Map 95 progression w/ water lowered 2
173: [0, 214, 216, False, [[512, 1]]], # Map 95 progression w/ water lowered 2
174: [0, 217, 218, False, [[511, 1]]], # Map 96 progression w/ water lowered 1
175: [0, 222, 221, True, [[511, 1], [64, 1]]], # Map 97 progression w/ water lowered 1 & Friar
176: [0, 222, 221, True, [[511, 1], [67, 1]]], # Map 97 progression w/ water lowered 1 & Firebird
177: [0, 222, 221, False, [[511, 1], [601, 1]]], # Map 97 progression w/ water lowered 1 & glitches
178: [0, 226, 227, False, [[511, 1]]], # Map 98 progression w/ water lowered 1
179: [0, 227, 229, False, [[512, 1]]], # Map 98 progression w/ water lowered 2
180: [0, 228, 230, False, [[512, 1]]], # Map 98 progression w/ water lowered 2
181: [0, 229, 230, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
182: [0, 230, 229, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
183: [0, 226, 246, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
184: [0, 237, 238, False, [[62, 1]]], # Map 101 progression w/ Psycho Slider
185: [0, 240, 241, False, [[19, 2]]], # Map 102 progression w/ Rama Statues
186: [0, 231, 247, False, [[18, 1]]], # Water lowered 1 w/ Hope Statue
187: [0, 232, 248, False, [[18, 2]]], # Water lowered 2 w/ Hope Statues
# Angel Dungeon
210: [0, 263, 264, False, [[62, 1]]], # Map 112 progression w/ Psycho Slider
211: [0, 265, 275, False, [[62, 1]]], # Map 112 backwards progression w/ Psycho Slider
212: [0, 267, 268, False, [[62, 1]]], # Map 114 progression w/ Psycho Slider
213: [0, 277, 276, False, [[62, 1]]], # Map 114 backwards progression w/ Psycho Slider
214: [0, 272, 273, False, [[513, 1]]], # Ishtar's chest w/ puzzle complete
# Great Wall
220: [0, 294, 295, False, [[601, 1]]], # Map 133 progression w/ glitches
221: [0, 296, 295, False, [[63, 1]]], # Map 133 progression w/ Spin Dash
222: [0, 296, 295, True, []], # Map 133 progression w/ Freedan
223: [0, 298, 299, True, [[64, 1]]], # Map 135 progression w/ Friar
224: [0, 298, 299, True, [[67, 1]]], # Map 135 progression w/ Firebird
225: [0, 299, 298, False, [[64, 1], [54, 2]]], # Map 135 progression w/ Friar III
227: [0, 300, 301, False, [[63, 1]]], # Map 136 progression w/ Spin Dash
228: [0, 295, 294, False, [[63, 1]]], # Map 133 progression w/ Spin Dash
# Euro
230: [0, 314, 315, False, [[40, 1]]], # Ann item w/ Apple
# Mt. Temple
240: [0, 331, 332, False, [[63, 1]]], # Map 161 progression w/ Spin Dash
241: [0, 332, 331, False, [[63, 1]]], # Map 161 backwards progression w/ Spin Dash
242: [0, 333, 514, False, [[26, 1]]], # Map 162 progression w/ Mushroom drops 1
243: [0, 335, 514, False, [[26, 1]]], # Map 162 progression w/ Mushroom drops 1 -- IS THIS TRUE?
244: [0, 339, 515, False, [[26, 2]]], # Map 162 progression w/ Mushroom drops 2
245: [0, 340, 515, False, [[26, 2]]], # Map 162 progression w/ Mushroom drops 2 -- IS THIS TRUE?
246: [0, 340, 516, False, [[26, 3]]], # Map 162 progression w/ Mushroom drops 3
247: [0, 341, 516, False, [[26, 3]]], # Map 162 progression w/ Mushroom drops 3 -- IS THIS TRUE?
# Natives'
250: [0, 353, 354, False, [[29, 1]]], # Statues awake w/ Gorgon Flower
# Ankor Wat
260: [-1, 361, 362, True, [[64, 1]]], # Map 177 progression w/ Friar
261: [0, 363, 364, False, [[63, 1]]], # Map 178 progression w/ Spin Dash
262: [0, 364, 365, False, [[62, 1]]], # Map 178 progression w/ Psycho Slider
263: [0, 365, 364, False, [[62, 1]]], # Map 178 progression w/ Psycho Slider
264: [0, 367, 366, False, [[63, 1]]], # Map 179 progression w/ Spin Dash
265: [0, 369, 370, False, [[62, 1]]], # Map 181 progression w/ Psycho Slider
266: [0, 370, 371, False, [[63, 1]]], # Map 181 progression w/ Spin Dash
267: [0, 373, 374, True, [[66, 1]]], # Map 183 progression w/ Earthquaker
268: [0, 373, 374, True, [[64, 1], [54, 2]]], # Map 183 progression w/ upgraded Friar
269: [0, 373, 374, True, [[64, 1], [601, 1]]], # Map 183 progression w/ Friar and glitches
270: [0, 373, 374, True, [[67, 1]]], # Map 183 progression w/ Firebird -- IS THIS TRUE?
271: [0, 376, 377, True, [[64, 1]]], # Map 184 progression w/ Friar
272: [0, 376, 377, True, [[36, 1]]], # Map 184 progression w/ Shadow
273: [0, 384, 392, False, [[28, 1]]], # Map 188 progression w/ Black Glasses
274: [0, 385, 393, False, [[28, 1]]], # Map 188 progression w/ Black Glasses
275: [0, 384, 392, False, [[601, 1]]], # Map 188 progression w/ glitches
276: [0, 385, 393, False, [[601, 1]]], # Map 188 progression w/ glitches
277: [0, 392, 393, False, [[62, 1]]], # Map 188 progression w/ Slider
278: [0, 393, 392, False, [[62, 1]]], # Map 188 progression w/ Slider
279: [0, 386, 387, False, [[62, 1]]], # Map 188 progression w/ Psycho Slider
280: [0, 387, 386, False, [[62, 1]]], # Map 188 progression w/ Psycho Slider
# Pyramid
290: [0, 410, 411, False, [[62, 1]]], # Map 204 progression w/ Slider
291: [0, 410, 411, False, [[63, 1]]], # Map 204 progression w/ Spin
292: [0, 410, 411, False, [[601, 1]]], # Map 204 progression w/ glitches
293: [0, 411, 412, False, [[36, 1]]], # Map 204 progression w/ Aura
294: [0, 411, 413, False, [[36, 1]]], # Map 204 progression w/ Aura
295: [0, 415, 449, False, [[30, 1], [31, 1], [32, 1], [33, 1], [34, 1], [35, 1], [38, 1]]],
# Boss door open w/ Hieroglyphs
296: [0, 416, 417, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
297: [0, 417, 416, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
298: [0, 418, 419, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
299: [0, 419, 418, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
300: [0, 426, 427, True, [[36, 1]]], # Map 212 progression w/ Aura
301: [0, 426, 427, True, [[66, 1]]], # Map 212 progression w/ Earthquaker
302: [0, 427, 428, True, [[36, 1]]], # Map 212 progression w/ Aura
303: [0, 427, 429, True, [[36, 1]]], # Map 212 progression w/ Aura
304: [0, 427, 429, True, [[66, 1]]], # Map 212 progression w/ Earthquaker
305: [0, 431, 432, False, [[63, 1]]], # Map 214 progression w/ Spin Dash
306: [0, 431, 434, True, [[36, 1]]], # Map 214 progression w/ Aura
307: [0, 431, 433, True, [[64, 1]]], # Map 214 progression w/ Friar
308: [0, 438, 439, False, [[63, 1]]], # Map 217 progression w/ Spin Dash
309: [0, 439, 438, False, [[63, 1]]], # Map 217 progression w/ Spin Dash
310: [0, 440, 441, False, [[63, 1]]], # Map 219 progression w/ Spin Dash
311: [0, 441, 440, False, [[63, 1]]], # Map 219 progression w/ Spin Dash
312: [0, 435, 450, False, [[6, 6], [50, 2], [51, 1], [52, 1]]],
# Killer 6 w/ herbs and upgrades
313: [0, 435, 450, True, [[64, 1], [54, 1]]],
# Killer 6 w/ Friar II
314: [0, 411, 414, False, [[517, 1]]], # Pyramid to boss w/hieroglyphs placed
# Babel / Mansion
320: [0, 461, 462, False, [[36, 1], [39, 1]]], # Map 219 progression w/ Aura and Ring
321: [0, 473, 479, False, [[522, 1]]], # Olman statue w/ Mummy Queen 2
322: [0, 473, 479, False, [[523, 1]]], # Olman statue w/ Solid Arm
323: [0, 480, 481, False, [[62, 1]]], # Mansion progression w/ Slider
# Endgame / Misc
400: [-1, 49, 490, False, [[20, 1]]], # Rescue Kara from Edward's w/ Magic Dust
401: [-1, 150, 490, False, [[20, 1]]], # Rescue Kara from Mine w/ Magic Dust
402: [-1, 270, 490, False, [[20, 1]]], # Rescue Kara from Angel w/ Magic Dust
403: [-1, 345, 490, False, [[20, 1]]], # Rescue Kara from Mt. Temple w/ Magic Dust
404: [-1, 391, 490, False, [[20, 1]]], # Rescue Kara from Ankor Wat w/ Magic Dust
405: [0, 490, 491, False, [[36, 1], [39, 1], [602, 1]]], # Early Firebird w/ Kara, Aura and Ring
406: [0, 490, 492, False, [[36, 1], [100, 0], [101, 0], [102, 0], [103, 0], [104, 0], [105, 0]]],
# Beat Game w/Mystic Statues and Aura
407: [0, 490, 492, False, [[36, 1], [106, self.statues_required]]] # Beat Game w/Mystic Statues and Aura (player choice variant)
}
# Define addresses for in-game spoiler text
self.spoiler_addresses = {
0: "4caf5", # Edward's Castle guard, top floor (4c947)
1: "4e9ff", # Itory elder (4e929)
2: "58ac0", # Gold Ship queen (589ff)
3: "5ad6b", # Man at Diamond Coast (5ab5c)
# 4: "5bfde", # Freejia laborer (5bfaa)
5: "69167", # Seaside Palace empty coffin (68feb)
6: "6dc97", # Ishtar's apprentice (6dc50)
7: "79c81", # Watermia, Kara's journal (79bf5)
8: "7d892", # Euro: Erasquez (7d79e)
9: "89b2a", # Ankor Wat, spirit (89abf)
10: "8ad0c", # Dao: girl with note (8acc5)
11: "99b8f" # Babel: spirit (99b2e)
}
# Define location text for in-game format
self.location_text = {
0: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
1: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
2: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
3: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
4: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
5: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
6: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
7: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
8: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
9: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
10: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
11: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x42\x80\xa3\xa4\x8b\x84", # "Edward's Castle"
12: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x42\x80\xa3\xa4\x8b\x84", # "Edward's Castle"
13: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x60\xa2\x88\xa3\x8e\x8d", # "Edward's Prison"
14: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x60\xa2\x88\xa3\x8e\x8d", # "Edward's Prison"
15: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
16: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
17: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
18: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
19: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
20: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
21: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
22: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
23: b"\x4c\x8e\x8e\x8d\xac\x64\xa2\x88\x81\x84", # "Moon Tribe"
24: b"\x48\x8d\x82\x80", # "Inca"
25: b"\x48\x8d\x82\x80", # "Inca"
26: b"\x48\x8d\x82\x80", # "Inca"
27: b"\x48\x8d\x82\x80", # "Inca"
28: b"\x63\x88\x8d\x86\x88\x8d\x86\xac\xa3\xa4\x80\xa4\xa5\x84", # "Singing Statue"
29: b"\x48\x8d\x82\x80", # "Inca"
30: b"\x48\x8d\x82\x80", # "Inca"
31: b"\x48\x8d\x82\x80", # "Inca"
32: b"\x46\x8e\x8b\x83\xac\x63\x87\x88\xa0", # "Gold Ship"
33: b"\xd6\x0e\x42\x8e\x80\xa3\xa4", # "Diamond Coast"
34: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
35: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
36: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
37: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
38: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
39: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
40: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
41: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
42: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
43: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
44: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
45: b"\x63\x80\x8c", # "Sam"
46: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
47: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
48: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
49: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
50: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
51: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
52: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
53: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
54: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
55: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
56: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
57: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
58: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
59: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
60: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
61: b"\xd7\x32\xd7\x93", # "Seaside Palace"
62: b"\xd7\x32\xd7\x93", # "Seaside Palace"
63: b"\xd7\x32\xd7\x93", # "Seaside Palace"
64: b"\x41\xa5\x85\x85\xa9", # "Buffy"
65: b"\x42\x8e\x85\x85\x88\x8d", # "Coffin"
66: b"\xd7\x32\xd7\x93", # "Seaside Palace"
67: b"\x4c\xa5", # "Mu"
68: b"\x4c\xa5", # "Mu"
69: b"\x4c\xa5", # "Mu"
70: b"\x4c\xa5", # "Mu"
71: b"\x4c\xa5", # "Mu"
72: b"\x4c\xa5", # "Mu"
73: b"\x4c\xa5", # "Mu"
74: b"\x4c\xa5", # "Mu"
75: b"\x4c\xa5", # "Mu"
76: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
77: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
78: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
79: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
80: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
81: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
82: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
83: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
84: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
85: b"\x4b\x80\x8d\x82\x84", # "Lance"
86: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
87: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
88: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
89: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
90: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
91: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
92: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
93: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
94: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
95: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
96: b"\x44\xa5\xa2\x8e", # "Euro"
97: b"\x44\xa5\xa2\x8e", # "Euro"
98: b"\x44\xa5\xa2\x8e", # "Euro"
99: b"\x44\xa5\xa2\x8e", # "Euro"
100: b"\x44\xa5\xa2\x8e", # "Euro"
101: b"\x44\xa5\xa2\x8e", # "Euro"
102: b"\x40\x8d\x8d", # "Ann"
103: b"\x44\xa5\xa2\x8e", # "Euro"
104: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
105: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
106: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
107: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
108: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3\xac\x6e\x84\x8d\x83\x6f", # "Mt. Kress (end)"
109: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
110: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
111: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
112: b"\xd7\x21\x66\x88\x8b\x8b\x80\x86\x84", # "Native Village"
113: b"\x63\xa4\x80\xa4\xa5\x84", # "Statue"
114: b"\xd7\x21\x66\x88\x8b\x8b\x80\x86\x84", # "Native Village"
115: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
116: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
117: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
118: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
119: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
120: b"\x63\x87\xa2\xa5\x81\x81\x84\xa2", # "Shrubber"
121: b"\x63\xa0\x88\xa2\x88\xa4", # "Spirit"
122: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
123: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
124: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
125: b"\x43\x80\x8e", # "Dao"
126: b"\x43\x80\x8e", # "Dao"
127: b"\x43\x80\x8e", # "Dao"
128: b"\x63\x8d\x80\x8a\x84\xac\x86\x80\x8c\x84", # "Snake Game"
129: b"\x43\x80\x8e", # "Dao"
130: b"\x46\x80\x88\x80", # "Gaia"
131: b"\xd6\x3f", # "Pyramid"
132: b"\xd6\x3f", # "Pyramid"
133: b"\xd6\x3f", # "Pyramid"
134: b"\xd6\x3f", # "Pyramid"
135: b"\xd6\x3f", # "Pyramid"
136: b"\x4a\x88\x8b\x8b\x84\xa2\xac\x26", # "Killer 6"
137: b"\xd6\x3f", # "Pyramid"
138: b"\xd6\x3f", # "Pyramid"
139: b"\xd6\x3f", # "Pyramid"
140: b"\xd6\x3f", # "Pyramid"
141: b"\xd6\x3f", # "Pyramid"
142: b"\xd6\x3f", # "Pyramid"
143: b"\x41\x80\x81\x84\x8b", # "Babel"
144: b"\x41\x80\x81\x84\x8b", # "Babel"
145: b"\x41\x80\x81\x84\x8b", # "Babel"
146: b"\x41\x80\x81\x84\x8b", # "Babel"
147: b"\x49\x84\xa7\x84\x8b\x84\xa2\x0e\xa3\xac\x4c\x80\x8d\xa3\x88\x8e\x8d", # "Jeweler's Mansion"
148: "", # "Castoth"
149: "", # "Viper"
150: "", # "Vampires"
151: "", # "<NAME>"
152: "", # "Mummy Queen"
153: "" # "Olman"
}
# Define long item text for in-game format
self.item_text_long = {
0: b"\xd3\xd6\x1d\x8d\x8e\xa4\x87\x88\x8d\x86\x4f\xac\xac\xac\xac\xac\xac\xac\xac",
1: b"\xd3\xd6\x1d\x80\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\x4f\xac\xac\xac\xac",
2: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x60\xa2\x88\xa3\x8e\x8d\xac\x4a\x84\xa9\x4f\xac",
3: b"\xd3\xd6\x1d\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x40\x4f\xac\xac",
4: b"\xd3\xd6\x1d\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x41\x4f\xac\xac",
5: "",
6: b"\xd3\xd6\x1d\x80\x8d\xac\x87\x84\xa2\x81\x4f\xac\xac\xac\xac\xac\xac\xac\xac",
7: b"\xd3\x64\x87\x84\xac\x43\x88\x80\x8c\x8e\x8d\x83\xac\x41\x8b\x8e\x82\x8a\x4f",
8: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x67\x88\x8d\x83\xac\x4c\x84\x8b\x8e\x83\xa9\x4f",
9: b"\xd3\xd6\x1d\x4b\x8e\x8b\x80\x0e\xa3\xac\x4c\x84\x8b\x8e\x83\xa9\x4f\xac\xac",
10: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4b\x80\xa2\x86\x84\xac\x62\x8e\x80\xa3\xa4\x4f",
11: b"\xd3\xd6\x1d\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x40\x4f\xac\xac\xac\xac\xac",
12: b"\xd3\xd6\x1d\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x41\x4f\xac\xac\xac\xac\xac",
13: b"\xd3\x64\x87\x84\xac\x4c\x84\x8c\x8e\xa2\xa9\xac\x4c\x84\x8b\x8e\x83\xa9\x4f",
14: b"\xd3\xd6\x1d\x80\xac\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x41\x80\x8b\x8b\x4f\xac",
15: b"\xd3\x64\x87\x84\xac\x44\x8b\x84\xa6\x80\xa4\x8e\xa2\xac\x4a\x84\xa9\x4f\xac",
16: b"\xd3\x64\x87\x84\xac\x4c\xa5\xac\x60\x80\x8b\x80\x82\x84\xac\x4a\x84\xa9\x4f",
17: b"\xd3\x64\x87\x84\xac\x60\xa5\xa2\x88\xa4\xa9\xac\x63\xa4\x8e\x8d\x84\x4f\xac",
18: b"\xd3\x40\xac\x63\xa4\x80\xa4\xa5\x84\xac\x8e\x85\xac\x47\x8e\xa0\x84\x4f\xac",
19: b"\xd3\xd6\x1d\x80\xac\x62\x80\x8c\x80\xac\x63\xa4\x80\xa4\xa5\x84\x4f\xac\xac",
20: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4c\x80\x86\x88\x82\xac\x43\xa5\xa3\xa4\x4f\xac",
21: "",
22: b"\xd3\xd6\x1d\x4b\x80\x8d\x82\x84\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2\x4f\xac",
23: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4d\x84\x82\x8a\x8b\x80\x82\x84\x4f\xac\xac\xac",
24: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x67\x88\x8b\x8b\x4f\xac\xac\xac\xac\xac\xac\xac",
25: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x64\x84\x80\xa0\x8e\xa4\x4f\xac\xac\xac\xac\xac",
26: b"\xd3\xd6\x1d\x4c\xa5\xa3\x87\xa2\x8e\x8e\x8c\xac\x43\xa2\x8e\xa0\xa3\x4f\xac",
27: "",
28: b"\xd3\x64\x87\x84\xac\x41\x8b\x80\x82\x8a\xac\x46\x8b\x80\xa3\xa3\x84\xa3\x4f",
29: b"\xd3\x64\x87\x84\xac\x46\x8e\xa2\x86\x8e\x8d\xac\x45\x8b\x8e\xa7\x84\xa2\x4f",
30: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
31: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
32: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
33: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
34: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
35: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
36: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x40\xa5\xa2\x80\x4f\xac\xac\xac\xac\xac\xac\xac",
37: b"\xd3\xd6\x1d\x4b\x8e\x8b\x80\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2\x4f\xac\xac",
38: b"\xd3\xd6\x1d\x45\x80\xa4\x87\x84\xa2\x0e\xa3\xac\x49\x8e\xa5\xa2\x8d\x80\x8b",
39: b"\xd3\x64\x87\x84\xac\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x62\x88\x8d\x86\x4f\xac",
40: b"\xd3\xd6\x1d\x80\x8d\xac\x40\xa0\xa0\x8b\x84\x4f\xac\xac\xac\xac\xac\xac\xac",
41: b"\xd3\xd6\x1d\x22\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\x4f\xac\xac\xac",
42: b"\xd3\xd6\x1d\x23\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\x4f\xac\xac\xac",
50: b"\xd3\xd6\x1d\x80\x8d\xac\x47\x60\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
51: b"\xd3\xd6\x1d\x80\xac\x43\x44\x45\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
52: b"\xd3\xd6\x1d\x80\xac\x63\x64\x62\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
53: b"\xd3\xd6\x3c\x43\x80\xa3\x87\xac\x88\xa3\xac\x88\x8c\xa0\xa2\x8e\xa6\x84\x83",
54: b"\xd3\x45\xa2\x88\x80\xa2\xac\x88\xa3\xac\x88\x8c\xa0\xa2\x8e\xa6\x84\x83\x4f",
55: b"\xd3\xd6\x1d\x80\xac\x47\x84\x80\xa2\xa4\xac\x60\x88\x84\x82\x84\x4f\xac\xac"
}
# Define short item text for in-game format
# Currently only used in Jeweler's inventory
self.item_text_short = {
0: b"\x4d\x8e\xa4\x87\x88\x8d\x86\xac\xac\xac\xac\xac\xac",
1: b"\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xac\xac\xac\xac",
2: b"\x60\xa2\x88\xa3\x8e\x8d\xac\x4a\x84\xa9\xac\xac\xac",
3: b"\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x40",
4: b"\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x41",
5: "",
6: b"\x47\x84\xa2\x81\xac\xac\xac\xac\xac\xac\xac\xac\xac",
7: b"\x43\x88\x80\x8c\x8e\x8d\x83\xac\x41\x8b\x8e\x82\x8a",
8: b"\x67\x88\x8d\x83\xac\x4c\x84\x8b\x8e\x83\xa9\xac\xac",
9: b"\x4b\x8e\x8b\x80\x0e\xa3\xac\x4c\x84\x8b\x8e\x83\xa9",
10: b"\x4b\x80\xa2\x86\x84\xac\x62\x8e\x80\xa3\xa4\xac\xac",
11: b"\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x40\xac\xac\xac",
12: b"\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x41\xac\xac\xac",
13: b"\x4c\x84\x8c\x8e\xa2\xa9\xac\x4c\x84\x8b\x8e\x83\xa9",
14: b"\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x41\x80\x8b\x8b\xac",
15: b"\x44\x8b\x84\xa6\x80\xa4\x8e\xa2\xac\x4a\x84\xa9\xac",
16: b"\x4c\xa5\xac\x60\x80\x8b\x80\x82\x84\xac\x4a\x84\xa9",
17: b"\x60\xa5\xa2\x88\xa4\xa9\xac\x63\xa4\x8e\x8d\x84\xac",
18: b"\x47\x8e\xa0\x84\xac\x63\xa4\x80\xa4\xa5\x84\xac\xac",
19: b"\x62\x80\x8c\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\xac",
20: b"\x4c\x80\x86\x88\x82\xac\x43\xa5\xa3\xa4\xac\xac\xac",
21: "",
22: b"\x4b\x80\x8d\x82\x84\xac\x4b\x84\xa4\xa4\x84\xa2\xac",
23: b"\x4d\x84\x82\x8a\x8b\x80\x82\x84\xac\xac\xac\xac\xac",
24: b"\x67\x88\x8b\x8b\xac\xac\xac\xac\xac\xac\xac\xac\xac",
25: b"\x64\x84\x80\xa0\x8e\xa4\xac\xac\xac\xac\xac\xac\xac",
26: b"\x63\x87\xa2\x8e\x8e\x8c\xac\x43\xa2\x8e\xa0\xa3\xac",
27: "",
28: b"\x41\x8b\x80\x82\x8a\xac\x46\x8b\x80\xa3\xa3\x84\xa3",
29: b"\x46\x8e\xa2\x86\x8e\x8d\xac\x45\x8b\x8e\xa7\x84\xa2",
30: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
31: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
32: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
33: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
34: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
35: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
36: b"\x40\xa5\xa2\x80\xac\xac\xac\xac\xac\xac\xac\xac\xac",
37: b"\x4b\x8e\x8b\x80\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2",
38: b"\x49\x8e\xa5\xa2\x8d\x80\x8b\xac\xac\xac\xac\xac\xac",
39: b"\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x62\x88\x8d\x86\xac",
40: b"\x40\xa0\xa0\x8b\x84\xac\xac\xac\xac\xac\xac\xac\xac",
41: b"\x22\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\xac",
42: b"\x23\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\xac",
50: b"\x47\x60\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac\xac",
51: b"\x43\x44\x45\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac",
52: b"\x63\x64\x62\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac",
53: b"\x43\x80\xa3\x87\xac\x65\xa0\x86\xa2\x80\x83\x84\xac",
54: b"\x45\xa2\x88\x80\xa2\xac\x65\xa0\x86\xa2\x80\x83\x84",
55: b"\x47\x84\x80\xa2\xa4\xac\x60\x88\x84\x82\x84\xac\xac",
61: b"\xd6\x3c\x43\x80\xa3\x87",
62: b"\xd6\x3c\x63\x8b\x88\x83\x84\xa2",
63: b"\xd7\x31\x43\x80\xa3\x87",
64: b"\xd6\x0c\x45\xa2\x88\x80\xa2",
65: b"\xd6\x03\x41\x80\xa2\xa2\x88\x84\xa2",
66: b"\x44\x80\xa2\xa4\x87\xa1\xa5\x80\x8a\x84\xa2"
}
# Database of enemy groups and spritesets
# FORMAT: { ID: [ROM_Loction, HeaderCode, HeaderData, Name]}
self.enemysets = {
0: [b"\x03\x00\x10\x10\xEC\x59\xCD\x01\x04\x00\x60\xA0\x8C\x75\xDE\x10\xD0\x21\x00\x47\xED\x9F", "Underground Tunnel"],
1: [b"\x03\x00\x10\x10\xBC\x33\xC2\x01\x04\x00\x60\xA0\x0C\x77\xDE\x10\x2A\x0F\x00\xE6\x08\xD5", "Inca Ruins (Mud Monster and Larva)"],
2: [b"\x03\x00\x10\x10\x23\x4D\xC2\x01\x04\x00\x60\xA0\xCC\x77\xDE\x10\x36\x23\x00\x24\x45\xCC", "Inca Ruins (Statues)"],
3: [b"\x03\x00\x10\x10\x16\x5C\xCC\x01\x04\x00\x60\xA0\xCC\x7A\xDE\x10\x30\x29\x00\xBE\x2F\xCB", "Diamond Mine"],
4: [b"\x03\x00\x10\x10\x62\x3D\xCF\x01\x04\x00\x60\xA0\x4C\x7C\xDE\x10\x54\x1D\x00\xEF\xEE\x9E", "Sky Garden (top)"],
5: [b"\x03\x00\x10\x10\x62\x3D\xCF\x01\x04\x00\x60\xA0\x0C\x7D\xDE\x10\x54\x1D\x00\xEF\xEE\x9E", "Sky Garden (bottom)"],
6: [b"\x03\x00\x10\x10\x2D\x2E\xCC\x01\x04\x00\x60\xA0\x00\x00\xDF\x10\x16\x1C\x00\x41\x36\xD1", "Mu"],
7: [b"\x03\x00\x10\x10\xD1\x14\xCF\x01\x04\x00\x60\xA0\x40\x02\xDF\x10\x7F\x0F\x00\x2C\x2B\xD5", "Angel Dungeon"],
8: [b"\x03\x00\x10\x10\x6D\x13\xD0\x01\x04\x00\x60\xA0\x40\x05\xDF\x10\xFF\x16\x00\xF7\xF3\x99", "Great Wall"],
9: [b"\x03\x00\x10\x10\x00\x00\xD0\x01\x04\x00\x60\xA0\x40\x08\xDF\x10\x70\x0E\x00\x5C\x4D\xD8", "Mt. Kress"],
10: [b"\x03\x00\x10\x10\xEA\x15\xCE\x01\x04\x00\x70\x90\x53\x55\xDE\x10\xD5\x14\x00\x08\x73\xCC", "Ankor Wat (outside)"],
11: [b"\x03\x00\x10\x10\x81\x6A\xC1\x01\x04\x00\x70\x90\x13\x57\xDE\x10\x57\x10\x00\x5F\x39\xD4", "Ankor Wat (inside)"],
12: [b"\x03\x00\x10\x10\x0d\x18\xcb\x01\x04\x00\x60\x90\x80\x0a\xdf\x10\xfb\x13\x00\x0e\x67\xd1", "Pyramid"],
13: [b"\x03\x00\x10\x10\x16\x5C\xCC\x01\x04\x00\x60\xA0\xC0\x0C\xDF\x10\x30\x29\x00\xBE\x2F\xCB", "Jeweler's Mansion"]
}
# Enemy map database
# FORMAT: { ID: [EnemySet, RewardBoss(0 for no reward), Reward[type, tier], SearchHeader,
# SpritesetOffset,EventAddrLow,EventAddrHigh,RestrictedEnemysets]}
# ROM address for room reward table is mapID + $1aade
self.maps = {
# For now, no one can have enemyset 10 (Ankor Wat outside)
# Underground Tunnel
12: [0, 1, [0,0], b"\x0C\x00\x02\x05\x03", 4, "c867a", "c86ac", []],
13: [0, 1, [0,0], b"\x0D\x00\x02\x03\x03", 4, "c86ac", "c875c", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
14: [0, 1, [0,0], b"\x0E\x00\x02\x03\x03", 4, "c875c", "c8847", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Weird 4way issues
15: [0, 1, [0,0], b"\x0F\x00\x02\x03\x03", 4, "c8847", "c8935", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
18: [0, 1, [0,0], b"\x12\x00\x02\x03\x03", 4, "c8986", "c8aa9", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Spike balls
# Inca Ruins
27: [1, 0, [0,0], b"\x1B\x00\x02\x05\x03", 4, "c8c33", "c8c87", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]], # Moon Tribe cave
29: [1, 1, [0,0], b"\x1D\x00\x02\x0F\x03", 4, "c8cc4", "c8d85", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
32: [1, 1, [0,0], b"\x20\x00\x02\x08\x03", 4, "c8e16", "c8e75", []], # Broken statue
33: [2, 1, [0,0], b"\x21\x00\x02\x08\x03", 4, "c8e75", "c8f57", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Floor switch
34: [2, 1, [0,0], b"\x22\x00\x02\x08\x03", 4, "c8f57", "c9029", []], # Floor switch
35: [2, 1, [0,0], b"\x23\x00\x02\x0A\x03", 4, "c9029", "c90d5", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
37: [1, 1, [0,0], b"\x25\x00\x02\x08\x03", 4, "c90f3", "c91a0", [1]], # Diamond block
38: [1, 1, [0,0], b"\x26\x00\x02\x08\x03", 4, "c91a0", "c9242", []], # Broken statues
39: [1, 1, [0,0], b"\x27\x00\x02\x0A\x03", 4, "c9242", "c92f2", []],
40: [1, 1, [0,0], b"\x28\x00\x02\x08\x03", 4, "c92f2", "c935f", [1]], # Falling blocks
# Diamond Mine
61: [3, 2, [0,0], b"\x3D\x00\x02\x08\x03", 4, "c9836", "c98b7", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
62: [3, 2, [0,0], b"\x3E\x00\x02\x08\x03", 4, "c98b7", "c991a", []],
63: [3, 2, [0,0], b"\x3F\x00\x02\x05\x03", 4, "c991a", "c9a41", []],
64: [3, 2, [0,0], b"\x40\x00\x02\x08\x03", 4, "c9a41", "c9a95", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]], # Trapped laborer (??)
65: [3, 2, [0,0], b"\x41\x00\x02\x00\x03", 4, "c9a95", "c9b39", [0, 2, 3, 4, 5, 11]], # Stationary Grundit
69: [3, 2, [0,0], b"\x45\x00\x02\x08\x03", 4, "c9ba1", "c9bf4", []],
70: [3, 2, [0,0], b"\x46\x00\x02\x08\x03", 4, "c9bf4", "c9c5c", [3, 13]],
# Sky Garden
77: [4, 2, [0,0], b"\x4D\x00\x02\x12\x03", 4, "c9db3", "c9e92", []],
78: [5, 2, [0,0], b"\x4E\x00\x02\x10\x03", 4, "c9e92", "c9f53", []],
79: [4, 2, [0,0], b"\x4F\x00\x02\x12\x03", 4, "c9f53", "ca01a", [4, 5]],
80: [5, 2, [0,0], b"\x50\x00\x02\x10\x03", 4, "ca01a", "ca0cb", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
81: [4, 2, [0,0], b"\x51\x00\x02\x12\x03", 4, "ca0cb", "ca192", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
82: [5, 2, [0,0], b"\x52\x00\x02\x10\x03", 4, "ca192", "ca247", [4, 5]],
83: [4, 2, [0,0], b"\x53\x00\x02\x12\x03", 4, "ca247", "ca335", [4, 5]],
84: [5, 2, [0,0], b"\x54\x00\x02\x12\x03", 4, "ca335", "ca43b", [4, 5]],
# Mu
# 92: [6,0,0,b"\x5C\x00\x02\x15\x03",4,[]], # Seaside Palace
95: [6, 3, [0,0], b"\x5F\x00\x02\x14\x03", 4, "ca71b", "ca7ed", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
96: [6, 3, [0,0], b"\x60\x00\x02\x14\x03", 4, "ca7ed", "ca934", [6]],
97: [6, 3, [0,0], b"\x61\x00\x02\x14\x03", 4, "ca934", "caa7b", [6]],
98: [6, 3, [0,0], b"\x62\x00\x02\x14\x03", 4, "caa7b", "cab28", []],
100: [6, 3, [0,0], b"\x64\x00\x02\x14\x03", 4, "cab4b", "cabd4", []],
101: [6, 3, [0,0], b"\x65\x00\x02\x14\x03", 4, "cabd4", "cacc3", [6]],
# Angel Dungeon
109: [7, 3, [0,0], b"\x6D\x00\x02\x16\x03", 4, "caf6e", "cb04b", [7, 8, 9, 10]], # Add 10's back in once flies are fixed
110: [7, 3, [0,0], b"\x6E\x00\x02\x18\x03", 4, "cb04b", "cb13e", [7, 8, 9, 10]],
111: [7, 3, [0,0], b"\x6F\x00\x02\x1B\x03", 4, "cb13e", "cb1ae", [7, 8, 9, 10]],
112: [7, 3, [0,0], b"\x70\x00\x02\x16\x03", 4, "cb1ae", "cb258", [7, 8, 9, 10]],
113: [7, 3, [0,0], b"\x71\x00\x02\x18\x03", 4, "cb258", "cb29e", [7, 8, 9, 10]],
114: [7, 3, [0,0], b"\x72\x00\x02\x18\x03", 4, "cb29e", "cb355", [7, 8, 9, 10]],
# Great Wall
130: [8, 4, [0,0], b"\x82\x00\x02\x1D\x03", 4, "cb6c1", "cb845", [8, 9, 10]], # Add 10's back in once flies are fixed
131: [8, 4, [0,0], b"\x83\x00\x02\x1D\x03", 4, "cb845", "cb966", [7, 8, 9, 10]],
133: [8, 4, [0,0], b"\x85\x00\x02\x1D\x03", 4, "cb97d", "cbb18", [8, 9, 10]],
134: [8, 4, [0,0], b"\x86\x00\x02\x1D\x03", 4, "cbb18", "cbb87", [7, 8, 9, 10]],
135: [8, 4, [0,0], b"\x87\x00\x02\x1D\x03", 4, "cbb87", "cbc3b", [8]],
136: [8, 4, [0,0], b"\x88\x00\x02\x1D\x03", 4, "cbc3b", "cbd0a", [7, 8, 9]],
# Mt Temple
160: [9, 4, [0,0], b"\xA0\x00\x02\x20\x03", 4, "cc18c", "cc21c", []],
161: [9, 4, [0,0], b"\xA1\x00\x02\x20\x03", 4, "cc21c", "cc335", [7, 8, 9, 10]],
162: [9, 4, [0,0], b"\xA2\x00\x02\x20\x03", 4, "cc335", "cc3df", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 13]], # Drops
163: [9, 4, [0,0], b"\xA3\x00\x02\x20\x03", 4, "cc3df", "cc4f7", []],
164: [9, 4, [0,0], b"\xA4\x00\x02\x20\x03", 4, "cc4f7", "cc5f8", [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13]],
165: [9, 4, [0,0], b"\xA5\x00\x02\x20\x03", 4, "cc5f8", "cc703", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 13]], # Drops
166: [9, 4, [0,0], b"\xA6\x00\x02\x20\x03", 4, "cc703", "cc7a1", []],
167: [9, 4, [0,0], b"\xA7\x00\x02\x20\x03", 4, "cc7a1", "cc9a3", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
168: [9, 4, [0,0], b"\xA8\x00\x02\x20\x03", 4, "cc9a3", "cca02", [7, 8, 9, 10]],
# <NAME>
176: [10, 6, [0,0], b"\xB0\x00\x02\x2C\x03", 4, "ccb1b", "ccbd8", []],
177: [11, 6, [0,0], b"\xB1\x00\x02\x08\x03", 4, "ccbd8", "ccca5", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
178: [11, 6, [0,0], b"\xB2\x00\x02\x08\x03", 4, "ccca5", "ccd26", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 13]],
179: [11, 6, [0,0], b"\xB3\x00\x02\x08\x03", 4, "ccd26", "ccd83", []],
180: [11, 6, [0,0], b"\xB4\x00\x02\x08\x03", 4, "ccd83", "ccdd7", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 13]],
181: [11, 6, [0,0], b"\xB5\x00\x02\x08\x03", 4, "ccdd7", "cce7b", []],
182: [10, 6, [0,0], b"\xB6\x00\x02\x2C\x03", 4, "cce7b", "cd005", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
183: [11, 6, [0,0], b"\xB7\x00\x02\x08\x03", 4, "cd005", "cd092", []], # Earthquaker Golem
184: [11, 6, [0,0], b"\xB8\x00\x02\x08\x03", 4, "cd092", "cd0df", [0, 1, 3, 4, 5, 7, 8, 9, 11, 13]],
185: [11, 6, [0,0], b"\xB9\x00\x02\x08\x03", 4, "cd0df", "cd137", []],
186: [10, 6, [0,0], b"\xBA\x00\x02\x2C\x03", 4, "cd137", "cd197", []],
187: [11, 6, [0,0], b"\xBB\x00\x02\x08\x03", 4, "cd197", "cd1f4", []],
188: [11, 6, [0,0], b"\xBC\x00\x02\x24\x03", 4, "cd1f4", "cd29a", []],
189: [11, 6, [0,0], b"\xBD\x00\x02\x08\x03", 4, "cd29a", "cd339", []],
190: [11, 6, [0,0], b"\xBE\x00\x02\x08\x03", 4, "cd339", "cd392", []],
# Pyramid
204: [12, 5, [0,0], b"\xCC\x00\x02\x08\x03", 4, "cd539", "cd58c", []],
206: [12, 5, [0,0], b"\xCE\x00\x02\x08\x03", 4, "cd5c6", "cd650", []],
207: [12, 5, [0,0], b"\xCF\x00\x02\x08\x03", 4, "cd650", "cd6f3", []],
208: [12, 5, [0,0], b"\xD0\x00\x02\x08\x03", 4, "cd6f3", "cd752", []],
209: [12, 5, [0,0], b"\xD1\x00\x02\x08\x03", 4, "cd752", "cd81b", []],
210: [12, 5, [0,0], b"\xD2\x00\x02\x08\x03", 4, "cd81b", "cd8f1", []],
211: [12, 5, [0,0], b"\xD3\x00\x02\x08\x03", 4, "cd8f1", "cd9a1", []],
212: [12, 5, [0,0], b"\xD4\x00\x02\x08\x03", 4, "cd9a1", "cda80", []],
213: [12, 5, [0,0], b"\xD5\x00\x02\x08\x03", 4, "cda80", "cdb4b", []],
214: [12, 5, [0,0], b"\xD6\x00\x02\x26\x03", 4, "cdb4b", "cdc1e", []],
215: [12, 5, [0,0], b"\xD7\x00\x02\x28\x03", 4, "cdc1e", "cdcfd", [0, 2, 3, 4, 5, 6, 8, 9, 11, 12, 13]],
216: [12, 5, [0,0], b"\xD8\x00\x02\x08\x03", 4, "cdcfd", "cde4f", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
217: [12, 5, [0,0], b"\xD9\x00\x02\x26\x03", 4, "cde4f", "cdf3c", []],
219: [12, 5, [0,0], b"\xDB\x00\x02\x26\x03", 4, "cdf76", "ce010", [0, 4, 5, 8, 9, 11, 12]], #Spike elevators
# Jeweler's Mansion
233: [13, 0, [0,0], b"\xE9\x00\x02\x22\x03", 4, "ce224", "ce3a6", [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 12, 13]]
}
# Database of enemy types
# FORMAT: { ID: [Enemyset, Event addr, VanillaTemplate,
# Type(1=stationary,2=walking,3=flying),OnWalkableTile,CanBeRandom,Name]}
self.enemies = {
# Underground Tunnel
0: [0, b"\x55\x87\x8a", b"\x05", 2, True, True, "Bat"], # a8755
1: [0, b"\x6c\x82\x8a", b"\x01", 2, True, True, "Ribber"],
2: [0, b"\x00\x80\x8a", b"\x02", 1, False, True, "Canal Worm"],
3: [0, b"\xf7\x85\x8a", b"\x03", 2, True, False, "King Bat"],
4: [0, b"\x76\x84\x8a", b"\x10", 2, True, True, "Skull Chaser"],
5: [0, b"\xff\x86\x8a", b"\x04", 2, True, False, "Bat Minion 1"],
6: [0, b"\x9a\x86\x8a", b"\x04", 2, True, False, "Bat Minion 2"],
7: [0, b"\x69\x86\x8a", b"\x04", 2, True, False, "Bat Minion 3"],
8: [0, b"\xcb\x86\x8a", b"\x04", 2, True, False, "Bat Minion 4"],
# Inca Ruins
10: [1, b"\xb7\x8d\x8a", b"\x0b", 2, True, True, "Slugger"],
11: [1, b"\xb6\x8e\x8a", b"\x0b", 2, True, False, "Scuttlebug"],
12: [1, b"\x1b\x8b\x8a", b"\x0a", 2, True, True, "Mudpit"],
13: [1, b"\x70\x8c\x8a", b"\x0c", 1, True, True, "Four Way"],
14: [2, b"\xee\x97\x8a", b"\x0f", 2, True, True, "Splop"],
15: [2, b"\xbc\x98\x8a", b"\x0e", 3, False, True, "Whirligig"],
16: [2, b"\xc2\x95\x8a", b"\x0d", 2, True, False, "Stone Lord R"], # shoots fire
17: [2, b"\xb3\x95\x8a", b"\x0d", 2, True, True, "Stone Lord D"], # shoots fire
18: [2, b"\xb8\x95\x8a", b"\x0d", 2, True, False, "Stone Lord U"], # shoots fire
19: [2, b"\xbd\x95\x8a", b"\x0d", 2, True, False, "Stone Lord L"], # shoots fire
20: [2, b"\x70\x90\x8a", b"\x0d", 2, True, False, "Stone Guard R"], # throws spears
21: [2, b"\x6b\x90\x8a", b"\x0d", 2, True, False, "Stone Guard L"], # throws spears
22: [2, b"\x61\x90\x8a", b"\x0d", 2, True, True, "Stone Guard D"], # throws spears
23: [2, b"\xc3\x99\x8a", b"\x0e", 1, False, False, "Whirligig (stationary)"],
# Diamond Mine
30: [3, b"\xca\xaa\x8a", b"\x18", 2, True, True, "Flayzer 1"],
31: [3, b"\x54\xaa\x8a", b"\x18", 2, True, False, "Flayzer 2"],
32: [3, b"\x8a\xaa\x8a", b"\x18", 2, True, False, "Flayzer 3"],
33: [3, b"\x03\xb1\x8a", b"\x19", 2, True, True, "Eye Stalker"],
34: [3, b"\xb3\xb0\x8a", b"\x19", 2, True, False, "Eye Stalker (stone)"],
35: [3, b"\xf5\xaf\x8a", b"\x1a", 1, True, True, "Grundit"],
# 36: [3,b"\xf5\xa4\x8a",b"\x1a","Grundit (stationary)"], # Can't randomize this guy
# Sky Garden
40: [4, b"\xb0\xb4\x8a", b"\x1d", 2, True, True, "Blue Cyber"],
41: [4, b"\x20\xc5\x8a", b"\x1b", 2, True, True, "Dynapede 1"],
42: [4, b"\x33\xc5\x8a", b"\x1b", 2, True, False, "Dynapede 2"],
43: [5, b"\xb0\xb8\x8a", b"\x1e", 2, True, True, "Red Cyber"],
44: [5, b"\x16\xc8\x8a", b"\x1c", 2, True, True, "Nitropede"],
# Mu
50: [6, b"\xcc\xe6\x8a", b"\x2b", 2, True, True, "Slipper"],
51: [6, b"\x5c\xe4\x8a", b"\x2a", 2, True, True, "Skuddle"],
52: [6, b"\x9e\xdd\x8a", b"\x28", 2, True, True, "Cyclops"],
53: [6, b"\x6e\xe2\x8a", b"\x29", 3, True, True, "Flasher"],
54: [6, b"\x07\xde\x8a", b"\x28", 2, True, False, "Cyclops (asleep)"],
55: [6, b"\xf4\xe6\x8a", b"\x2b", 2, True, True, "Slipper (falling)"],
# Angel Dungeon
60: [7, b"\x9f\xee\x8a", b"\x2d", 3, False, True, "Dive Bat"],
61: [7, b"\x51\xea\x8a", b"\x2c", 2, True, True, "Steelbones"],
62: [7, b"\x33\xef\x8a", b"\x2e", 1, True, True, "Draco"], # False for now...
63: [7, b"\xc7\xf0\x8a", b"\x2e", 1, True, True, "Ramskull"],
# Great Wall
70: [8, b"\x55\x91\x8b", b"\x33", 2, True, True, "Archer 1"],
71: [8, b"\xfe\x8e\x8b", b"\x33", 2, True, False, "Archer Statue"],
72: [8, b"\xbe\x8d\x8b", b"\x34", 2, True, True, "Eyesore"],
73: [8, b"\x70\x8c\x8b", b"\x35", 3, False, True, "Fire Bug 1"],
74: [8, b"\x70\x8c\x8b", b"\x33", 3, False, False, "Fire Bug 2"],
75: [8, b"\x23\x94\x8b", b"\x32", 2, True, True, "Asp"],
76: [8, b"\x65\x91\x8b", b"\x33", 2, True, False, "Archer 2"],
77: [8, b"\x77\x91\x8b", b"\x33", 2, True, False, "Archer 3"],
78: [8, b"\x72\x8f\x8b", b"\x46", 2, True, False, "Archer Statue (switch) 1"],
79: [8, b"\x4f\x8f\x8b", b"\x33", 2, True, False, "Archer Statue (switch) 2"],
# Mt. Kress
80: [9, b"\xac\x9b\x8b", b"\x3e", 3, True, True, "Skulker (N/S)"],
81: [9, b"\x4e\x9c\x8b", b"\x3e", 3, True, True, "Skulker (E/W)"],
82: [9, b"\x44\x9c\x8b", b"\x3e", 3, True, False, "Skulker (E/W)"],
83: [9, b"\xa2\x9b\x8b", b"\x3e", 3, True, False, "Skulker (E/W)"],
84: [9, b"\x8b\x9e\x8b", b"\x3d", 3, False, True, "Yorrick (E/W)"],
85: [9, b"\x53\x9f\x8b", b"\x3d", 3, False, False, "Yorrick (E/W)"],
86: [9, b"\x0f\x9d\x8b", b"\x3d", 3, False, True, "Yorrick (N/S)"],
87: [9, b"\xcd\x9d\x8b", b"\x3d", 3, False, False, "Yorrick (N/S)"],
88: [9, b"\x3b\x98\x8b", b"\x3f", 3, False, True, "Fire Sprite"],
89: [9, b"\xcf\xa0\x8b", b"\x3c", 2, True, True, "Acid Splasher"],
90: [9, b"\xa1\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary E)"],
91: [9, b"\x75\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary W)"],
92: [9, b"\x49\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary S)"],
93: [9, b"\x1d\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary N)"],
# Ankor Wat
100: [10, b"\xd7\xb1\x8b", b"\x49", 2, True, True, "Shrubber"],
101: [10, b"\xb4\xb1\x8b", b"\x49", 2, True, False, "Shrubber 2"],
102: [10, b"\x75\xb2\x8b", b"\x46", 2, True, True, "Zombie"],
103: [10, b"\x4f\xaf\x8b", b"\x4a", 3, True, True, "Zip Fly"], # False for now...
104: [11, b"\x8d\xbd\x8b", b"\x42", 3, True, True, "Goldcap"],
105: [11, b"\x25\xb8\x8b", b"\x45", 2, True, True, "Gorgon"],
106: [11, b"\x17\xb8\x8b", b"\x45", 2, True, False, "Gorgon (jump down)"],
107: [11, b"\xbb\xbf\x8b", b"\x43", 2, True, False, "Frenzie"],
108: [11, b"\xd0\xbf\x8b", b"\x43", 2, True, True, "Frenzie 2"],
109: [11, b"\x66\xbb\x8b", b"\x44", 1, False, True, "Wall Walker"],
110: [11, b"\x66\xbb\x8b", b"\x3a", 1, False, False, "Wall Walker 2"],
111: [11, b"\x5c\xbb\x8b", b"\x44", 1, False, False, "Wall Walker 3"],
112: [11, b"\x5c\xbb\x8b", b"\x3a", 1, False, False, "Wall Walker 4"],
113: [11, b"\xaf\x99\x88", b"\x45", 2, True, False, "Gorgon (block)"],
# Pyramid
120: [12, b"\x5f\xc6\x8b", b"\x4f", 1, True, True, "Mystic Ball (stationary)"],
121: [12, b"\xfc\xc5\x8b", b"\x4f", 2, True, True, "Mystic Ball"],
122: [12, b"\xa3\xc5\x8b", b"\x4f", 2, True, True, "Mystic Ball"],
123: [12, b"\x9d\xc3\x8b", b"\x4e", 2, True, True, "Tuts"],
124: [12, b"\x98\xc7\x8b", b"\x51", 1, True, True, "Blaster"],
125: [12, b"\x84\xc1\x8b", b"\x4c", 2, True, False, "Haunt (stationary)"],
126: [12, b"\xa7\xc1\x8b", b"\x4c", 2, True, True, "Haunt"],
# Babel Tower
# 130: [14,b"\xd7\x99\x8a",b"\x5a","Castoth (boss)"],
# 131: [14,b"\xd5\xd0\x8a",b"\x5b","Viper (boss)"],
# 132: [14,b"\x50\xf1\x8a",b"\x5c","Vampire (boss)"],
# 133: [14,b"\x9c\xf1\x8a",b"\x5c","Vampire (boss)"],
# 134: [14,b"\x00\x80\x8b",b"\x5d","Sand Fanger (boss)"],
# 135: [14,b"\x1a\xa6\x8b",b"\x5e","Mummy Queen (boss)"],
# Jeweler's Mansion
140: [13, b"\xca\xaa\x8a", b"\x61", 2, True, True, "Flayzer"],
141: [13, b"\xf5\xaf\x8a", b"\x63", 1, True, True, "Grundit"],
142: [13, b"\xd8\xb0\x8a", b"\x62", 2, True, False, "Eye Stalker 1"],
143: [13, b"\x03\xb1\x8a", b"\x62", 2, True, True, "Eye Stalker 2"]
# Bosses
# 24: [15,b"\x03\x9b\x8a",b"\x14","Castoth (boss)"],
# 45: [15,b"\x6f\xd1\x8a",b"\x27","Viper (boss)"],
# 55: [15,b"\xf7\xf1\x8a",b"\x2f","Vampire (boss)"],
# 56: [15,b"\xc8\xf3\x8a",b"\x30","Vampire (boss)"],
# 79: [15,b"\x5c\x81\x8b",b"\x36","Sand Fanger (boss)"],
# 128: [15,b"\xb6\xa6\x8b",b"\x50","Mummy Queen (boss)"],
# 143: [15,b"\x09\xf7\x88",b"\x5f","Solid Arm (boss)"],
# 140: [15,b"\xaa\xee\x8c",b"\x54","Dark Gaia"]
}
# Database of non-enemy sprites to disable in enemizer
# FORMAT: { ID: [Enemyset, Event addr, Name]}
self.nonenemy_sprites = {
# Underground Tunnel
0: [0, "a8835", "Movable statue"],
1: [0, "a87ce", "Falling spear 1"],
2: [0, "a87c3", "Falling spear 2"],
3: [0, "a8aae", "Spike ball 1"],
4: [0, "a8a0f", "Spike ball 2"],
5: [0, "a8a7d", "Spike ball 3"],
6: [0, "a8a46", "Spike ball 4"],
7: [0, "a89de", "Spike ball 5"],
# Inca Ruins
10: [1, "9c26f", "Skeleton 1"],
11: [1, "9c798", "Skeleton 2"],
# 12: [1,"9c89d","Skeleton 3"], # Spriteset already restricted for this room
13: [1, "9c8f7", "Skeleton 4"],
14: [1, "a8896", "Broken statue (chest)"],
15: [1, "a88de", "Broken statue (blockade)"],
# Diamond Mine
20: [3, "5d6a8", "Elevator sign"],
21: [3, "aa4f5", "Elevator platform 1"],
22: [3, "aa50c", "Elevator platform 2"],
23: [3, "aa4e2", "Elevator platform 3"],
# Sky Garden
30: [4, "5f8c0", "Broken statue"],
31: [4, "ac0fe", "Sword statue 1"],
# 32: [4,"ac150","Sword statue 2"],
33: [4, "ac3b3", "Sword statue 3"],
# 34: [4,"ac409","Sword statue 4"],
35: [4, "accd4", "Fire snake (top)"],
36: [5, "accf1", "Fire snake (bottom)"],
# Mu
40: [6, "69ce9", "Floor spikes 1"],
41: [6, "69d1f", "Floor spikes 2"],
42: [6, "ae943", "Fire snake"],
# 43: [6,"69d4d","Donut"],
# Angel
50: [7, "6d56f", "Flame 1"],
51: [7, "6d57e", "Flame 2"],
52: [7, "6d58f", "Flame 3"],
# Great Wall
60: [8, "b8c30", "Wall spike 1"],
61: [8, "b8bf8", "Wall spike 2"],
62: [8, "7bd17", "Wall spike 3"],
63: [8, "7bd46", "Wall spike 4"],
64: [8, "7bd75", "Wall spike 5"],
65: [8, "7bce8", "Wall spike 5"],
# Mt Kress (nothing)
# Ankor Wat
80: [11, "89f2c", "Floating crystal"],
81: [11, "89ffc", "Skeleton 1"],
82: [11, "8a25e", "Skeleton 2"]
# Pyramid
# 90: [12,"8b6a2","Warp point"],
# 91: [12,"8cd6c","Warp point"],
# Jeweler's Mansion (nothing)
}
# Database of overworld menus
# FORMAT: { ID: [ShuffleID (0=no shuffle), Menu_ID, FromRegion, ToRegion, ROM_EntranceData, ROM_TextLoc, MenuText, ContinentName, AreaName]}
self.overworld_menus = {
# SW Continent "\x01"
1: [0, b"\x01", 10, 20, "3b95b", "0cafd", "3b590", "SW Continent", "South Cape"],
2: [0, b"\x01", 10, 30, "3b96b", "0cb26", "3b5a9", "SW Continent", "Edward's"],
3: [0, b"\x01", 10, 50, "3b97b", "0cb5b", "3b5b5", "SW Continent", "Itory"],
4: [0, b"\x01", 10, 60, "3b98b", "4f453", "3b5c2", "SW Continent", "Moon Tribe"],
5: [0, b"\x01", 10, 63, "3b99b", "0cb74", "3b59c", "SW Continent", "Inca"],
# SE Continent "\x07"
6: [0, b"\x07", 11, 102, "3b9ab", "5aab7", "3b5ef", "SE Continent", "Diamond Coast"],
7: [0, b"\x07", 11, 110, "3b9bb", "0cba3", "3b5e3", "SE Continent", "Freejia"],
8: [0, b"\x07", 11, 133, "3b9cb", "0cbbc", "3b608", "SE Continent", "Diamond Mine"],
9: [0, b"\x07", 11, 160, "3b9db", "5e31e", "3b615", "SE Continent", "Neil's"],
10: [0, b"\x07", 11, 162, "3b9eb", "5e812", "3b5fc", "SE Continent", "Nazca"],
# NE Continent "\x0a"
11: [0, b"\x0a", 12, 250, "3ba1b", "0cbeb", "3b642", "NE Continent", "Angel Village"],
12: [0, b"\x0a", 12, 280, "3ba2b", "0cc30", "3b636", "NE Continent", "Watermia"],
13: [0, b"\x0a", 12, 290, "3ba3b", "0cc49", "3b64f", "NE Continent", "Great Wall"],
# N Continent "\x0f"
14: [0, b"\x0f", 13, 310, "3ba4b", "0cc8e", "3b660", "N Continent", "Euro"],
15: [0, b"\x0f", 13, 330, "3ba5b", "0cca7", "3b66c", "N Continent", "Mt. Temple"],
16: [0, b"\x0f", 13, 350, "3ba6b", "0ccec", "3b679", "N Continent", "Native's Village"],
17: [0, b"\x0f", 13, 360, "3ba7b", "0cd05", "3b685", "N Continent", "Ankor Wat"],
# NW Continent Overworld "\x16"
18: [0, b"\x16", 14, 400, "3ba8b", "0cd24", "3b696", "NW Continent", "Dao"],
19: [0, b"\x16", 14, 410, "3ba9b", "0cd55", "3b6a3", "NW Continent", "Pyramid"]
}
# Database of special map exits that don't conform to the typical "02 26" format, IDs correspond to self.exits
# FORMAT: { ID: [MapAddr, Xaddr, Yaddr, FaceDirAddr, CameraAddr]}
self.exits_detailed = {
15: ["8ce31", "8ce37", "8ce40", "", "8ce49"] # Mummy Queen exit
}
# Database of map exits
# FORMAT: { ID: [CoupleID (0 if one-way), ShuffleTo (0 if no shuffle), ShuffleFrom (0 if no shuffle), FromRegion, ToRegion,
# ROM_Location, DestString,BossFlag, DungeonFlag, DungeonEntranceFlag, Name]}
self.exits = {
# Bosses
1: [ 2, 0, 0, 78, 97, "18872", b"\x29\x78\x00\xC0\x00\x00\x00\x11", True, True, False, "Castoth entrance (in)"],
2: [ 1, 0, 0, 0, 0, "189e4", b"\x1E\x68\x00\x00\x01\x03\x00\x24", True, True, False, "Castoth entrance (out)"],
3: [ 0, 0, 0, 104, 102, "584cc", b"\x30\x48\x00\x10\x01\x83\x00\x21", True, True, False, "Diamond Coast passage (Gold Ship)"],
4: [ 5, 0, 0, 171, 198, "18e20", b"\x55\x70\x00\xE0\x01\x00\x00\x22", True, True, False, "Viper entrance (in)"],
5: [ 4, 0, 0, 0, 0, "19006", b"\x4C\xF8\x00\x30\x00\x03\x00\x22", True, True, False, "Viper entrance (out)"],
6: [ 0, 0, 0, 198, 200, "acece", b"\x5A\x90\x00\x70\x00\x83\x00\x14", True, True, False, "Seaside Palace passage (Viper)"],
7: [ 8, 0, 0, 241, 243, "69c62", b"\x67\x78\x01\xd0\x01\x80\x01\x22", True, True, False, "Vampires entrance (in)"],
8: [ 7, 0, 0, 0, 0, "193f8", b"\x65\xb8\x00\x80\x02\x03\x00\x44", True, True, False, "Vampires entrance (out)"],
9: [ 0, 0, 0, 242, 212, "193ea", b"\x5f\x80\x00\x50\x00\x83\x00\x44", True, True, False, "Vampires exit"],
10: [11, 0, 0, 301, 302, "19c2a", b"\x8A\x50\x00\x90\x00\x87\x00\x33", True, True, False, "Sand Fanger entrance (in)"],
11: [10, 0, 0, 0, 0, "19c78", b"\x88\xE0\x03\x90\x00\x06\x00\x14", True, True, False, "Sand Fanger entrance (out)"],
12: [ 0, 0, 0, 303, 290, "19c84", b"\x82\x10\x00\x90\x00\x87\x00\x18", True, True, False, "Sand Fanger exit"],
13: [14, 0, 0, 414, 448, "8cdcf", b"\xDD\xF8\x00\xB0\x01\x00\x00\x22", True, True, False, "Mummy Queen entrance (in)"],
14: [13, 0, 0, 0, 0, "", b"\xCC\xF8\x01\x20\x01\x03\x00\x44", True, True, False, "Mummy Queen entrance (out)"], # fake
15: [ 0, 0, 0, 448, 415, "", b"\xCD\x70\x00\x90\x00\x83\x00\x11", True, True, False, "Mummy Queen exit"], # This one's dumb, see exits_detailed
16: [17, 0, 0, 470, 471, "1a8c2", b"\xE3\xD8\x00\x90\x03\x83\x30\x44", True, True, False, "Babel entrance (in)"],
17: [16, 0, 0, 0, 0, "1a8d0", b"\xE2\xD0\x00\xE0\x00\x03\x00\x84", True, True, False, "Babel entrance (out)"],
18: [ 0, 0, 0, 472, 400, "9804a", b"\xC3\x10\x02\x90\x00\x83\x00\x23", True, True, False, "Dao passage (Babel)"],
19: [20, 0, 0, 481, 482, "1a94e", b"\xEA\x78\x00\xC0\x00\x00\x00\x11", True, True, False, "Solid Arm entrance (in)"],
20: [19, 0, 0, 0, 0, "", b"\xE9\x78\x03\x90\x00\x03\x00\x44", True, True, False, "Solid Arm entrance (out)"], # fake
21: [ 0, 0, 0, 472, 400, "", b"\xC3\x10\x02\x90\x00\x83\x00\x23", True, True, False, "Dao passage (Solid Arm)"], # fake
# 21: [ 0, 0, 0, 482, 472, "", b"\xE3\x80\x02\xB0\x01\x80\x10\x23", True, True, False, "Babel passage (Solid Arm)"], # This one stays, @98115
# Passage Menus
22: [0, 0, 0, 15, 28, "", b"", False, False, False, "Seth: Passage 1 (South Cape)"],
23: [0, 0, 0, 15, 102, "", b"", False, False, False, "Seth: Passage 2 (Diamond Coast)"],
24: [0, 0, 0, 15, 280, "", b"", False, False, False, "Seth: Passage 3 (Watermia)"],
25: [0, 0, 0, 16, 60, "", b"", False, False, False, "Moon Tribe: Passage 1 (Moon Tribe)"],
26: [0, 0, 0, 16, 200, "", b"", False, False, False, "Moon Tribe: Passage 2 (Seaside Palace)"],
27: [0, 0, 0, 17, 161, "", b"", False, False, False, "Neil: Passage 1 (Neil's)"],
28: [0, 0, 0, 17, 314, "", b"", False, False, False, "Neil: Passage 2 (Euro)"],
29: [0, 0, 0, 17, 402, "", b"", False, False, False, "Neil: Passage 3 (Dao)"],
30: [0, 0, 0, 17, 460, "", b"", False, False, False, "Neil: Passage 4 (Babel)"],
# South Cape
31: [32, 0, 0, 20, 22, "18444", b"", False, False, False, "South Cape: School main (in)"], # Duplicate exit at 18438?
32: [31, 0, 0, 0, 0, "1856c", b"", False, False, False, "South Cape: School main (out)"],
33: [34, 0, 0, 21, 22, "18498", b"", False, False, False, "South Cape: School roof (in)"],
34: [33, 0, 0, 0, 0, "18560", b"", False, False, False, "South Cape: School roof (out)"],
35: [36, 0, 0, 20, 23, "18474", b"", False, False, False, "South Cape: Will's House (in)"],
36: [35, 0, 0, 0, 0, "1852a", b"", False, False, False, "South Cape: Will's House (out)"],
37: [38, 0, 0, 20, 24, "18480", b"", False, False, False, "South Cape: East House (in)"],
38: [37, 0, 0, 0, 0, "18552", b"", False, False, False, "South Cape: East House (out)"],
39: [40, 0, 0, 20, 27, "1845c", b"", False, False, False, "South Cape: Erik's House main (in)"],
40: [39, 0, 0, 0, 0, "184e8", b"", False, False, False, "South Cape: Erik's House main (out)"],
41: [42, 0, 0, 20, 27, "184a4", b"", False, False, False, "South Cape: Erik's House roof (in)"],
42: [41, 0, 0, 0, 0, "184f4", b"", False, False, False, "South Cape: Erik's House roof (out)"],
43: [44, 0, 0, 20, 26, "18450", b"", False, False, False, "South Cape: Lance's House (in)"],
44: [43, 0, 0, 0, 0, "184c0", b"", False, False, False, "South Cape: Lance's House (out)"],
45: [46, 0, 0, 20, 25, "18468", b"", False, False, False, "South Cape: Seth's House (in)"],
46: [45, 0, 0, 0, 0, "1851c", b"", False, False, False, "South Cape: Seth's House (out)"],
47: [48, 0, 0, 20, 28, "1848c", b"", False, False, False, "South Cape: Seaside Cave (in)"],
48: [47, 0, 0, 0, 0, "4be6a", b"", False, False, False, "South Cape: Seaside Cave (out)"],
# Edward's / Prison
50: [51, 0, 0, 31, 49, "1857c", b"", False, True, True, "Tunnel back entrance (in)"],
51: [50, 0, 0, 0, 0, "186f4", b"", False, True, True, "Tunnel back entrance (out)"],
52: [53, 0, 0, 33, 40, "1860c", b"\x0C\x58\x00\x50\x00\x83\x00\x12", False, True, True, "Tunnel entrance (in)"], # set checkpoint
53: [52, 0, 0, 0, 0, "18626", b"", False, True, True, "Tunnel entrance (out)"],
54: [ 0, 0, 0, 30, 32, "4c755", b"", False, False, False, "Prison entrance (king)"],
#55: [54, 0, 0, 0, 2, "", b"\x0a\xe0\x01\x60\x01\x03\x20\x34", False, False, False, "Prison exit (king), fake"],
# Tunnel
60: [61, 0, 0, 40, 41, "18632", b"", False, True, False, "Tunnel: Map 12 to Map 13"],
61: [60, 0, 0, 0, 0, "18640", b"", False, True, False, "Tunnel: Map 13 to Map 12"],
62: [63, 0, 0, 41, 42, "1864c", b"", False, True, False, "Tunnel: Map 13 to Map 14"],
63: [62, 0, 0, 0, 0, "1865a", b"", False, True, False, "Tunnel: Map 14 to Map 13"],
64: [65, 0, 0, 42, 43, "18666", b"", False, True, False, "Tunnel: Map 14 to Map 15"],
65: [64, 0, 0, 0, 0, "18680", b"", False, True, False, "Tunnel: Map 15 to Map 14"],
66: [67, 0, 0, 43, 44, "1868c", b"", False, True, False, "Tunnel: Map 15 to Map 16"],
67: [66, 0, 0, 0, 0, "1869a", b"", False, True, False, "Tunnel: Map 16 to Map 15"],
68: [69, 0, 0, 43, 45, "18674", b"", False, True, False, "Tunnel: Map 15 to Map 17"],
69: [68, 0, 0, 0, 0, "186a8", b"", False, True, False, "Tunnel: Map 17 to Map 15"],
70: [71, 0, 0, 46, 47, "186b4", b"", False, True, False, "Tunnel: Map 17 to Map 18"],
71: [70, 0, 0, 0, 0, "186c2", b"", False, True, False, "Tunnel: Map 18 to Map 17"],
72: [73, 0, 0, 48, 49, "186ce", b"", False, True, False, "Tunnel: Map 18 to Map 19"],
73: [72, 0, 0, 0, 0, "186e8", b"", False, True, False, "Tunnel: Map 19 to Map 18"],
# Itory
80: [81, 0, 0, 51, 53, "18704", b"", False, False, False, "Itory: West House (in)"],
81: [80, 0, 0, 0, 0, "1874e", b"", False, False, False, "Itory: West House (out)"],
82: [83, 0, 0, 51, 54, "18728", b"", False, False, False, "Itory: North House (in)"],
83: [82, 0, 0, 0, 0, "18776", b"", False, False, False, "Itory: North House (out)"],
84: [85, 0, 0, 51, 55, "18710", b"", False, False, False, "Itory: Lilly Front Door (in)"],
85: [84, 0, 0, 0, 0, "1875c", b"", False, False, False, "Itory: Lilly Front Door (out)"],
86: [87, 0, 0, 52, 55, "1871c", b"", False, False, False, "Itory: Lilly Back Door (in)"],
87: [86, 0, 0, 0, 0, "18768", b"", False, False, False, "Itory: Lilly Back Door (out)"],
88: [89, 0, 0, 51, 56, "18734", b"", False, False, False, "Itory Cave (in)"],
89: [88, 0, 0, 0, 0, "18784", b"", False, False, False, "Itory Cave (out)"],
90: [91, 0, 0, 57, 58, "18790", b"", False, False, False, "Itory Cave Hidden Room (in)"], # always linked?
91: [90, 0, 0, 0, 0, "1879c", b"", False, False, False, "Itory Cave Hidden Room (out)"],
# Moon Tribe
100: [101, 0, 0, 60, 61, "187b6", b"", False, False, False, "Moon Tribe Cave (in)"],
101: [100, 0, 0, 0, 0, "187c4", b"", False, False, False, "Moon Tribe Cave (out)"],
102: [ 0, 0, 0, 64, 170, "9d1ea", b"", False, True, True, "Moon Tribe: Sky Garden passage"],
# Inca
110: [111, 0, 0, 63, 70, "187d2", b"", False, True, True, "Inca Ruins entrance (in)"],
111: [110, 0, 0, 0, 0, "187e0", b"", False, True, True, "Inca Ruins entrance (out)"],
#114: [ 0, 0, 0, 65, 102, "", b"", False, False, True, "Inca: Diamond Coast passage"],
# Inca Ruins
120: [121, 0, 0, 70, 89, "", b"", False, True, False, "Inca: Map 29 to Map 37 (E)"],
121: [120, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 37 to Map 29 (E)"],
122: [123, 0, 0, 89, 94, "", b"", False, True, False, "Inca: Map 37 to Map 39"],
123: [122, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 39 to Map 37"],
124: [125, 0, 0, 94, 71, "", b"", False, True, False, "Inca: Map 39 to Map 29"],
125: [124, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 39"],
126: [127, 0, 0, 90, 72, "", b"", False, True, False, "Inca: Map 37 to Map 29 (W)"],
127: [126, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 37 (W)"],
128: [129, 0, 0, 72, 91, "", b"", False, True, False, "Inca: Map 29 to Map 38"],
129: [128, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 38 to Map 29"],
130: [131, 0, 0, 73, 80, "", b"", False, True, False, "Inca: Map 29 to Map 32"],
131: [130, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 32 to Map 29"],
132: [133, 0, 0, 81, 85, "", b"", False, True, False, "Inca: Map 32 to Map 35"],
133: [132, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 35 to Map 32"],
134: [135, 0, 0, 85, 74, "", b"", False, True, False, "Inca: Map 35 to Map 29"],
135: [134, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 35 to Map 29"],
136: [137, 0, 0, 74, 79, "", b"", False, True, False, "Inca: Map 29 to Map 31"],
137: [136, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 31 to Map 29"],
138: [139, 0, 0, 79, 95, "", b"", False, True, False, "Inca: Map 31 to Map 40"],
139: [138, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 40 to Map 31"],
140: [141, 0, 0, 96, 76, "", b"", False, True, False, "Inca: Map 40 to Map 29"],
141: [140, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 40"],
142: [143, 0, 0, 86, 82, "", b"", False, True, False, "Inca: Map 35 to Map 33"],
143: [142, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 33 to Map 35"],
144: [145, 0, 0, 83, 75, "", b"", False, True, False, "Inca: Map 33 to Map 29"],
145: [144, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 33"],
146: [147, 0, 0, 99, 84, "", b"", False, True, False, "Inca: Map 29 to Map 34"], # Special case to allow for Z-ladder glitch
147: [146, 0, 0, 84, 75, "", b"", False, True, False, "Inca: Map 34 to Map 29"],
148: [149, 0, 0, 84, 93, "", b"", False, True, False, "Inca: Map 34 to Map 38"],
149: [148, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 38 to Map 34"],
150: [151, 0, 0, 84, 87, "", b"", False, True, False, "Inca: Map 34 to Map 36"],
151: [150, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 36 to Map 34"],
152: [153, 0, 0, 88, 77, "", b"", False, True, False, "Inca: Map 36 to Map 30"],
153: [152, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 30 to Map 36"],
154: [ 0, 0, 0, 98, 100, "", b"", False, True, False, "Gold Ship entrance"],
# Gold Ship
160: [161, 0, 0, 100, 101, "", b"", False, False, False, "Gold Ship Interior (in)"],
161: [160, 0, 0, 0, 0, "", b"", False, False, False, "Gold Ship Interior (out)"],
# Diamond Coast
172: [173, 0, 0, 102, 103, "18aa0", b"", False, False, False, "Coast House (in)"],
173: [172, 0, 0, 0, 0, "18aae", b"", False, False, False, "Coast House (out)"],
# Freejia
182: [183, 0, 0, 110, 116, "18aec", b"", False, False, False, "Freejia: West House (in)"],
183: [182, 0, 0, 0, 0, "18b9c", b"", False, False, False, "Freejia: West House (out)"],
184: [185, 0, 0, 110, 117, "18af8", b"", False, False, False, "Freejia: 2-story House (in)"],
185: [184, 0, 0, 0, 0, "18bc4", b"", False, False, False, "Freejia: 2-story House (out)"],
186: [187, 0, 0, 111, 117, "18b04", b"", False, False, False, "Freejia: 2-story Roof (in)"],
187: [186, 0, 0, 0, 0, "18bd0", b"", False, False, False, "Freejia: 2-story Roof (out)"],
188: [189, 0, 0, 110, 118, "18b10", b"", False, False, False, "Freejia: Lovers' House (in)"],
189: [188, 0, 0, 0, 0, "18bf8", b"", False, False, False, "Freejia: Lovers' House (out)"],
190: [191, 0, 0, 110, 119, "18b1c", b"", False, False, False, "Freejia: Hotel (in)"],
191: [190, 0, 0, 0, 0, "18c20", b"", False, False, False, "Freejia: Hotel (out)"],
192: [193, 0, 0, 119, 120, "18c2c", b"", False, False, False, "Freejia: Hotel West Room (in)"],
193: [192, 0, 0, 0, 0, "18c44", b"", False, False, False, "Freejia: Hotel West Room (out)"],
194: [195, 0, 0, 119, 121, "18c38", b"", False, False, False, "Freejia: Hotel East Room (in)"],
195: [194, 0, 0, 0, 0, "18c50", b"", False, False, False, "Freejia: Hotel East Room (out)"],
196: [197, 0, 0, 110, 122, "18b28", b"", False, False, False, "Freejia: Laborer House (in)"], # might take this out?
197: [196, 0, 0, 0, 0, "18c84", b"", False, False, False, "Freejia: Laborer House (out)"],
198: [199, 0, 0, 112, 122, "18b34", b"", False, False, False, "Freejia: Laborer Roof (in)"],
199: [198, 0, 0, 0, 0, "18c78", b"", False, False, False, "Freejia: Laborer Roof (out)"],
200: [201, 0, 0, 110, 123, "18b40", b"", False, False, False, "Freejia: Messy House (in)"],
201: [200, 0, 0, 0, 0, "18c92", b"", False, False, False, "Freejia: Messy House (out)"],
202: [203, 0, 0, 110, 124, "18abc", b"", False, False, False, "Freejia: Erik House (in)"],
203: [202, 0, 0, 0, 0, "18b5a", b"", False, False, False, "Freejia: Erik House (out)"],
204: [205, 0, 0, 110, 125, "18ac8", b"", False, False, False, "Freejia: Dark Space House (in)"],
205: [204, 0, 0, 0, 0, "18b68", b"", False, False, False, "Freejia: Dark Space House (out)"],
206: [207, 0, 0, 110, 126, "18ad4", b"", False, False, False, "Freejia: Labor Trade House (in)"],
207: [206, 0, 0, 0, 0, "18b82", b"", False, False, False, "Freejia: Labor Trade House (out)"],
208: [209, 0, 0, 113, 126, "18ae0", b"", False, False, False, "Freejia: Labor Trade Roof (in)"],
209: [208, 0, 0, 0, 0, "18b8e", b"", False, False, False, "Freejia: Labor Trade Roof (out)"],
210: [211, 0, 0, 114, 127, "18b4c", b"", False, False, False, "Freejia: Labor Market (in)"],
211: [210, 0, 0, 0, 0, "18ca0", b"", False, False, False, "Freejia: Labor Market (out)"],
# Diamond Mine
222: [223, 0, 0, 133, 134, "", b"", False, True, False, "Diamond Mine: Map 62 to Map 63"],
223: [222, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 62"],
224: [225, 0, 0, 135, 140, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 66"],
225: [224, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 63"],
226: [227, 0, 0, 134, 136, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 64"],
227: [226, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 64 to Map 63"],
228: [229, 0, 0, 136, 138, "", b"", False, True, False, "Diamond Mine: Map 64 to Map 65"],
229: [228, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 64"],
230: [231, 0, 0, 139, 143, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 66"],
231: [230, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 65"],
232: [233, 0, 0, 138, 130, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 61"],
233: [232, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 61 to Map 65"],
234: [235, 0, 0, 132, 142, "", b"", False, True, False, "Diamond Mine: Map 61 to Map 66"],
235: [234, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 61"],
236: [237, 0, 0, 140, 144, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 67 (1)"],
237: [236, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 67 to Map 66 (1)"],
238: [239, 0, 0, 145, 141, "", b"", False, True, False, "Diamond Mine: Map 67 to Map 66 (2)"],
239: [238, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 67 (2)"],
240: [241, 0, 0, 141, 146, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 68"],
241: [240, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 66"],
242: [243, 0, 0, 146, 148, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 69"],
243: [242, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 69 to Map 68"],
244: [245, 0, 0, 146, 149, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 70"],
245: [244, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 70 to Map 68"],
246: [247, 0, 0, 147, 150, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 71"],
247: [246, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 71 to Map 68"],
# Nazca
260: [261, 0, 0, 162, 170, "5e6a2", b"\x4C\x68\x01\x40\x00\x83\x00\x22", False, True, True, "Nazca: Sky Garden entrance"],
261: [260, 0, 0, 0, 0, "5f429", b"\x4B\xe0\x01\xc0\x02\x03\x00\x44", False, True, True, "Nazca: Sky Garden exit"],
# Sky Garden
#270: [ 0, 0, 0, 171, 16, "", b"", False, True, True, "Moon Tribe: Sky Garden passage"],
273: [274, 0, 0, 170, 172, "", b"", False, True, False, "Sky Garden: Map 76 to Map 77"],
274: [273, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 76"],
275: [276, 0, 0, 170, 176, "", b"", False, True, False, "Sky Garden: Map 76 to Map 79"],
276: [275, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 79 to Map 76"],
277: [278, 0, 0, 170, 181, "", b"", False, True, False, "Sky Garden: Map 76 to Map 81"],
278: [277, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 81 to Map 76"],
279: [280, 0, 0, 170, 190, "", b"", False, True, False, "Sky Garden: Map 76 to Map 83"],
280: [279, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 83 to Map 76"],
281: [282, 0, 0, 172, 175, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (E)"], # Room 1
282: [281, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (W)"],
283: [284, 0, 0, 175, 173, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (SE)"],
284: [283, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (SW)"],
285: [286, 0, 0, 175, 174, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (SW)"],
286: [285, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (SE)"],
287: [288, 0, 0, 176, 169, "", b"", False, True, False, "Sky Garden: Map 79 to Map 86"], # Room 2
288: [287, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 86 to Map 79"],
289: [290, 0, 0, 176, 179, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (NE)"],
290: [289, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (NW)"],
291: [292, 0, 0, 179, 177, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (N)"],
292: [291, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (N)"],
293: [294, 0, 0, 178, 180, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (S)"],
294: [293, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (S)"],
295: [296, 0, 0, 168, 186, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (NE)"], # Room 3
296: [295, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (NW)"],
297: [298, 0, 0, 182, 188, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (NW)"],
298: [297, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (NE)"],
299: [300, 0, 0, 184, 187, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (SE)"],
300: [299, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (SW)"],
301: [302, 0, 0, 191, 196, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (NW)"], # Room 4
302: [301, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (NE)"],
303: [304, 0, 0, 192, 195, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (C)"],
304: [303, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (C)"],
305: [306, 0, 0, 197, 193, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (SE)"],
306: [305, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (SW)"],
307: [308, 0, 0, 167, 195, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (E)"],
308: [307, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (W)"],
# Seaside Palace
310: [311, 0, 0, 211, 201, "69759", b"", False, False, False, "Seaside entrance"], # ALWAYS LINKED
311: [310, 0, 0, 0, 0, "1906a", b"", False, False, False, "Seaside exit"],
312: [313, 0, 0, 200, 202, "19046", b"", False, False, False, "Seaside: Area 1 NE Room (in)"],
313: [312, 0, 0, 0, 0, "19114", b"", False, False, False, "Seaside: Area 1 NE Room (out)"],
314: [315, 0, 0, 200, 203, "19052", b"", False, False, False, "Seaside: Area 1 NW Room (in)"],
315: [314, 0, 0, 0, 0, "19120", b"", False, False, False, "Seaside: Area 1 NW Room (out)"],
316: [317, 0, 0, 200, 204, "1905e", b"", False, False, False, "Seaside: Area 1 SE Room (in)"],
317: [316, 0, 0, 0, 0, "1912c", b"", False, False, False, "Seaside: Area 1 SE Room (out)"],
318: [319, 0, 0, 200, 205, "1903a", b"", False, False, False, "Seaside: Area 2 entrance"],
319: [318, 0, 0, 0, 0, "19146", b"", False, False, False, "Seaside: Area 2 exit"],
320: [321, 0, 0, 205, 207, "1915e", b"", False, False, False, "Seaside: Area 2 SW Room (in)"],
321: [320, 0, 0, 0, 0, "19138", b"", False, False, False, "Seaside: Area 2 SW Room (out)"],
322: [323, 0, 0, 205, 209, "19152", b"", False, False, False, "Seaside: Fountain (in)"],
323: [322, 0, 0, 0, 0, "191d4", b"", False, False, False, "Seaside: Fountain (out)"],
# Mu
330: [331, 0, 0, 210, 212, "191ee", b"", False, True, True, "Mu entrance"],
331: [330, 0, 0, 0, 0, "191fc", b"", False, True, True, "Mu exit"],
332: [333, 0, 0, 212, 217, "", b"", False, True, False, "Mu: Map 95 to Map 96"],
333: [332, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 96 to Map 95"],
334: [335, 0, 0, 217, 220, "", b"", False, True, False, "Mu: Map 96 to Map 97 (top)"],
335: [334, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 96 (top)"],
336: [337, 0, 0, 220, 231, "", b"", False, True, False, "Mu: Map 97 to Map 99"],
337: [336, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 99 to Map 97"],
338: [339, 0, 0, 220, 225, "", b"", False, True, False, "Mu: Map 97 to Map 98 (top)"],
339: [338, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 97 (top)"],
340: [341, 0, 0, 218, 222, "", b"", False, True, False, "Mu: Map 96 to Map 97 (middle)"],
341: [340, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 96 (middle)"],
342: [343, 0, 0, 223, 227, "", b"", False, True, False, "Mu: Map 97 to Map 98 (middle)"],
343: [342, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 97 (middle)"],
# 344: [345, 0, 0, 000, 000, "", b"", False, True, False, "Mu: Map 95 to Map 98 (middle)"],
# 345: [344, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 95 (middle)"],
346: [347, 0, 0, 227, 233, "", b"", False, True, False, "Mu: Map 98 to Map 100 (middle E)"],
347: [346, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 98 (middle E)"],
348: [349, 0, 0, 233, 237, "", b"", False, True, False, "Mu: Map 100 to Map 101 (middle N)"],
349: [348, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 101 to Map 100 (middle N)"],
350: [351, 0, 0, 237, 234, "", b"", False, True, False, "Mu: Map 101 to Map 100 (middle S)"],
351: [350, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 101 (middle S)"],
352: [353, 0, 0, 234, 228, "", b"", False, True, False, "Mu: Map 100 to Map 98 (middle W)"],
353: [352, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 100 (middle W)"],
354: [355, 0, 0, 213, 232, "", b"", False, True, False, "Mu: Map 95 to Map 99"],
355: [354, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 99 to Map 95"],
356: [357, 0, 0, 245, 246, "", b"", False, True, False, "Mu: Map 95 to Map 98 (top)"],
357: [356, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 95 (top)"],
358: [359, 0, 0, 229, 224, "", b"", False, True, False, "Mu: Map 98 to Map 97 (bottom)"],
359: [358, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 98 (bottom)"],
360: [361, 0, 0, 224, 219, "", b"", False, True, False, "Mu: Map 97 to Map 96 (bottom)"],
361: [360, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 96 to Map 97 (bottom)"],
362: [363, 0, 0, 230, 216, "", b"", False, True, False, "Mu: Map 98 to Map 95 (bottom)"],
363: [362, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 95 to Map 98 (bottom)"],
364: [365, 0, 0, 230, 235, "", b"", False, True, False, "Mu: Map 98 to Map 100 (bottom)"],
365: [364, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 98 (bottom)"],
366: [367, 0, 0, 235, 239, "", b"", False, True, False, "Mu: Map 100 to Map 101 (bottom)"],
367: [366, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 101 to Map 100 (bottom)"],
368: [369, 0, 0, 239, 240, "", b"", False, True, False, "Mu: Map 101 to Map 102"],
369: [368, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 102 to Map 101"],
# Angel Village
382: [383, 0, 0, 250, 210, "1941e", b"", False, False, False, "Angel: Mu Passage (in)"],
383: [382, 0, 0, 0, 0, "191e2", b"", False, False, False, "Angel: Mu Passage (out)"], #custom
384: [385, 0, 0, 250, 251, "1942a", b"", False, False, False, "Angel: Underground entrance (in)"],
385: [384, 0, 0, 0, 0, "19446", b"", False, False, False, "Angel: Underground entrance (out)"],
386: [387, 0, 0, 251, 252, "19452", b"", False, False, False, "Angel: Room 1 (in)"],
387: [386, 0, 0, 0, 0, "194de", b"", False, False, False, "Angel: Room 1 (out)"],
388: [389, 0, 0, 251, 253, "19476", b"", False, False, False, "Angel: Room 2 (in)"],
389: [388, 0, 0, 0, 0, "19502", b"", False, False, False, "Angel: Room 2 (out)"],
390: [391, 0, 0, 251, 254, "1945e", b"", False, False, False, "Angel: Dance Hall (in)"],
391: [390, 0, 0, 0, 0, "1950e", b"", False, False, False, "Angel: Dance Hall (out)"],
392: [393, 0, 0, 251, 255, "1946a", b"", False, False, False, "Angel: DS Room (in)"],
393: [392, 0, 0, 0, 0, "194f6", b"", False, False, False, "Angel: DS Room (out)"],
# Angel Dungeon
400: [401, 0, 0, 251, 260, "19482", b"", False, True, True, "Angel Dungeon entrance"],
401: [400, 0, 0, 0, 0, "19534", b"", False, True, True, "Angel Dungeon exit"],
402: [403, 0, 0, 260, 261, "19528", b"", False, True, False, "Angel Dungeon: Map 109 to Map 110"],
403: [402, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 110 to Map 109"],
404: [405, 0, 0, 278, 262, "", b"", False, True, False, "Angel Dungeon: Map 110 to Map 111"],
405: [404, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 111 to Map 110"],
406: [407, 0, 0, 262, 263, "", b"", False, True, False, "Angel Dungeon: Map 111 to Map 112"],
407: [406, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 112 to Map 111"],
408: [409, 0, 0, 264, 265, "", b"", False, True, False, "Angel Dungeon: Map 112 to Chest"],
409: [408, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Chest to Map 112"],
410: [411, 0, 0, 279, 266, "", b"", False, True, False, "Angel Dungeon: Map 112 to Map 113"],
411: [410, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 113 to Map 112"],
412: [413, 0, 0, 266, 267, "", b"", False, True, False, "Angel Dungeon: Map 113 to Map 114"],
413: [412, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 114 to Map 113"],
414: [415, 0, 0, 268, 276, "", b"", False, True, False, "Angel Dungeon: Map 114 to Ishtar Foyer"],
415: [414, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Ishtar Foyer to Map 114"],
# Ishtar's Studio
420: [421, 0, 0, 277, 269, "196b6", b"", False, False, False, "Ishtar entrance"],
421: [420, 0, 0, 0, 0, "196c2", b"", False, False, False, "Ishtar exit"],
422: [423, 0, 0, 269, 270, "196ce", b"", False, False, False, "Ishtar: Portrait room (in)"],
423: [422, 0, 0, 0, 0, "196f4", b"", False, False, False, "Ishtar: Portrait room (out)"],
424: [425, 0, 0, 269, 271, "196da", b"", False, False, False, "Ishtar: Side room (in)"],
425: [424, 0, 0, 0, 0, "19700", b"", False, False, False, "Ishtar: Side room (out)"],
426: [427, 0, 0, 269, 272, "196e6", b"", False, False, False, "Ishtar: Ishtar's room (in)"],
427: [426, 0, 0, 0, 0, "1970c", b"", False, False, False, "Ishtar: Ishtar's room (out)"],
428: [429, 0, 0, 272, 274, "19718", b"", False, False, False, "Ishtar: Puzzle room (in)"],
429: [428, 0, 0, 0, 0, "197e6", b"", False, False, False, "Ishtar: Puzzle room (out)"],
# Watermia
440: [441, 0, 0, 280, 286, "197f4", b"", False, False, False, "Watermia: Lance House (in)"],
441: [440, 0, 0, 0, 0, "1983e", b"", False, False, False, "Watermia: Lance House (out)"],
442: [443, 0, 0, 280, 282, "19818", b"", False, False, False, "Watermia: DS House (in)"],
443: [442, 0, 0, 0, 0, "19868", b"", False, False, False, "Watermia: DS House (out)"],
444: [445, 0, 0, 280, 283, "1980c", b"", False, False, False, "Watermia: Gambling House (in)"],
445: [444, 0, 0, 0, 0, "1985a", b"", False, False, False, "Watermia: Gambling House (out)"],
446: [447, 0, 0, 280, 284, "19824", b"", False, False, False, "Watermia: West House (in)"],
447: [446, 0, 0, 0, 0, "19882", b"", False, False, False, "Watermia: West House (out)"],
448: [449, 0, 0, 280, 285, "19830", b"", False, False, False, "Watermia: East House (in)"],
449: [448, 0, 0, 0, 0, "19890", b"", False, False, False, "Watermia: East House (out)"],
450: [451, 0, 0, 280, 287, "19800", b"", False, False, False, "Watermia: NW House (in)"],
451: [450, 0, 0, 0, 0, "1984c", b"", False, False, False, "Watermia: NW House (out)"],
452: [453, 0, 0, 288, 311, "", b"", False, False, True, "Watermia: Euro passage"],
453: [452, 0, 0, 0, 0, "", b"", False, False, True, "Euro: Watermia passage"],
# Great Wall
462: [463, 0, 0, 290, 291, "", b"", False, True, False, "Great Wall: Map 130 to Map 131"],
463: [462, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 131 to Map 130"],
464: [465, 0, 0, 293, 294, "", b"", False, True, False, "Great Wall: Map 131 to Map 133"],
465: [464, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 133 to Map 131"],
466: [467, 0, 0, 296, 297, "", b"", False, True, False, "Great Wall: Map 133 to Map 134"],
467: [466, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 134 to Map 133"],
468: [469, 0, 0, 297, 298, "", b"", False, True, False, "Great Wall: Map 134 to Map 135"],
469: [468, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 135 to Map 134"],
470: [471, 0, 0, 299, 300, "", b"", False, True, False, "Great Wall: Map 135 to Map 136"],
471: [470, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 136 to Map 135"],
# Euro
482: [483, 0, 0, 310, 312, "19cd2", b"", False, False, False, "Euro: Rolek Company (in)"],
483: [482, 0, 0, 0, 0, "19d74", b"", False, False, False, "Euro: Rolek Company (out)"],
484: [485, 0, 0, 310, 313, "19d0e", b"", False, False, False, "Euro: West House (in)"],
485: [484, 0, 0, 0, 0, "19e12", b"", False, False, False, "Euro: West House (out)"],
486: [487, 0, 0, 310, 314, "19cde", b"", False, False, False, "Euro: Rolek Mansion West (in)"],
487: [486, 0, 0, 0, 0, "19d9c", b"", False, False, False, "Euro: Rolek Mansion West (out)"],
488: [489, 0, 0, 310, 314, "19cea", b"", False, False, False, "Euro: Rolek Mansion East (in)"],
489: [488, 0, 0, 0, 0, "19da8", b"", False, False, False, "Euro: Rolek Mansion East (out)"],
490: [491, 0, 0, 310, 317, "19d26", b"", False, False, False, "Euro: Central House (in)"],
491: [490, 0, 0, 0, 0, "19e54", b"", False, False, False, "Euro: Central House (out)"],
492: [493, 0, 0, 310, 318, "19d32", b"", False, False, False, "Euro: Jeweler House (in)"],
493: [492, 0, 0, 0, 0, "19e62", b"", False, False, False, "Euro: Jeweler House (out)"],
494: [495, 0, 0, 310, 319, "19d3e", b"", False, False, False, "Euro: Twins House (in)"],
495: [494, 0, 0, 0, 0, "19e70", b"", False, False, False, "Euro: Twins House (out)"],
496: [497, 0, 0, 310, 320, "19cc6", b"", False, False, False, "Euro: Hidden House (in)"],
497: [496, 0, 0, 0, 0, "19d66", b"", False, False, False, "Euro: Hidden House (out)"],
498: [499, 0, 0, 310, 321, "19d4a", b"", False, False, False, "Euro: Shrine (in)"],
499: [498, 0, 0, 0, 0, "19e7e", b"", False, False, False, "Euro: Shrine (out)"],
500: [501, 0, 0, 310, 322, "19cba", b"", False, False, False, "Euro: Explorer's House (in)"],
501: [500, 0, 0, 0, 0, "19d58", b"", False, False, False, "Euro: Explorer's House (out)"],
502: [ 0, 0, 0, 310, 323, "19cf6", b"", False, False, False, "Euro: Store Entrance (in)"],
#503: [502, 0, 0, 0, 0, "", b"", False, False, False, "Euro: Store Entrance (out)"], #this doesn't exist!
504: [505, 0, 0, 310, 324, "19d02", b"", False, False, False, "Euro: Store Exit (in)"],
505: [504, 0, 0, 0, 0, "19e04", b"", False, False, False, "Euro: Store Exit (out)"],
506: [507, 0, 0, 314, 316, "19db4", b"", False, False, False, "Euro: Guest Room (in)"],
507: [506, 0, 0, 0, 0, "19df6", b"", False, False, False, "Euro: Guest Room (out)"],
508: [509, 0, 0, 310, 325, "19d1a", b"", False, False, False, "Euro: Dark Space House (in)"],
509: [508, 0, 0, 0, 0, "19e20", b"", False, False, False, "Euro: Dark Space House (out)"],
# Mt. Kress
522: [523, 0, 0, 330, 331, "", b"", False, True, False, "Mt. Kress: Map 160 to Map 161"],
523: [522, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 160"],
524: [525, 0, 0, 332, 333, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 162 (W)"],
525: [524, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 161 (W)"],
526: [527, 0, 0, 332, 334, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 162 (E)"],
527: [526, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 161 (E)"],
528: [529, 0, 0, 333, 337, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 163 (N)"],
529: [528, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 163 to Map 162 (N)"],
530: [531, 0, 0, 337, 336, "", b"", False, True, False, "Mt. Kress: Map 163 to Map 162 (S)"],
531: [530, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 163 (S)"],
532: [533, 0, 0, 333, 338, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 164"],
533: [532, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 164 to Map 162"],
534: [535, 0, 0, 335, 339, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 165"],
535: [534, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 162"],
536: [537, 0, 0, 339, 342, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 166"],
537: [536, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 166 to Map 165"],
538: [539, 0, 0, 340, 343, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 167"],
539: [538, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 167 to Map 165"],
540: [541, 0, 0, 341, 344, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 168"],
541: [540, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 168 to Map 165"],
542: [543, 0, 0, 344, 345, "", b"", False, True, False, "Mt. Kress: Map 168 to Map 169"],
543: [542, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 169 to Map 168"],
# Native's Village
552: [553, 0, 0, 350, 352, "19fe6", b"", False, False, False, "Native's Village: West House (in)"],
553: [552, 0, 0, 0, 0, "1a00c", b"", False, False, False, "Native's Village: West House (out)"],
554: [555, 0, 0, 350, 353, "19ff2", b"", False, False, False, "Native's Village: House w/Statues (in)"],
555: [554, 0, 0, 0, 0, "1a01a", b"", False, False, False, "Native's Village: House w/Statues (out)"],
556: [557, 0, 0, 351, 400, "", b"", False, False, True, "Native's Village: Dao Passage"],
557: [556, 0, 0, 0, 0, "", b"", False, False, True, "Dao: Natives' Passage"],
# Ankor Wat
562: [563, 0, 0, 360, 361, "1a028", b"", False, True, False, "Ankor Wat: Map 176 to Map 177"],
563: [562, 0, 0, 0, 0, "1a036", b"", False, True, False, "Ankor Wat: Map 177 to Map 176"],
564: [565, 0, 0, 361, 363, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 178"],
565: [564, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 178 to Map 177"],
566: [567, 0, 0, 365, 366, "", b"", False, True, False, "Ankor Wat: Map 178 to Map 179"],
567: [566, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 178"],
568: [569, 0, 0, 368, 367, "", b"", False, True, False, "Ankor Wat: Map 180 to Map 179"],
569: [568, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 180"],
570: [571, 0, 0, 367, 369, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 181"],
571: [570, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 181 to Map 179"],
572: [573, 0, 0, 371, 362, "", b"", False, True, False, "Ankor Wat: Map 181 to Map 177"],
573: [572, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 181"],
574: [575, 0, 0, 362, 372, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 182"], # Garden
575: [574, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 182 to Map 177"],
576: [577, 0, 0, 372, 373, "", b"", False, True, False, "Ankor Wat: Map 182 to Map 183"],
577: [576, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 182"],
578: [579, 0, 0, 373, 376, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 184"],
579: [578, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 184 to Map 183"],
580: [581, 0, 0, 374, 378, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 185 (W)"],
581: [580, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 185 to Map 183 (W)"],
582: [583, 0, 0, 378, 375, "", b"", False, True, False, "Ankor Wat: Map 185 to Map 183 (E)"],
583: [582, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 185 (E)"],
584: [585, 0, 0, 375, 379, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 186"],
585: [584, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 183"],
586: [587, 0, 0, 379, 381, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 187 (W)"],
587: [586, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 186 (W)"],
588: [589, 0, 0, 381, 380, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 186 (E)"],
589: [588, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 187 (E)"],
590: [591, 0, 0, 381, 384, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 188"],
591: [590, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187"],
592: [593, 0, 0, 393, 386, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 189"],
593: [592, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 188"],
594: [595, 0, 0, 387, 389, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 190 (E)"],
595: [594, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (E)"],
596: [596, 0, 0, 388, 390, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 190 (W)"],
597: [597, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (W)"],
598: [599, 0, 0, 390, 391, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 191"],
599: [598, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 191 to Map 190"],
600: [ 0, 0, 0, 366, 368, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 180 (drop)"],
601: [ 0, 0, 0, 392, 381, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NW-L (drop)"],
602: [ 0, 0, 0, 392, 381, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NW-R (drop)"],
603: [ 0, 0, 0, 392, 383, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NE (drop)"],
604: [ 0, 0, 0, 393, 382, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 SW (drop)"],
605: [ 0, 0, 0, 389, 388, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (drop)"],
# Dao
612: [613, 0, 0, 400, 401, "1a27c", b"", False, False, False, "Dao: NW House (in)"],
613: [612, 0, 0, 0, 0, "1a2d2", b"", False, False, False, "Dao: NW House (out)"],
614: [615, 0, 0, 400, 402, "1a288", b"", False, False, False, "Dao: Neil's House (in)"],
615: [614, 0, 0, 0, 0, "1a30a", b"", False, False, False, "Dao: Neil's House (out)"],
616: [617, 0, 0, 400, 403, "1a294", b"", False, False, False, "Dao: Snake Game House (in)"],
617: [616, 0, 0, 0, 0, "1a2ee", b"", False, False, False, "Dao: Snake Game House (out)"],
618: [619, 0, 0, 400, 404, "1a2a0", b"", False, False, False, "Dao: SW House (in)"],
619: [618, 0, 0, 0, 0, "1a2fc", b"", False, False, False, "Dao: SW House (out)"],
620: [621, 0, 0, 400, 405, "1a2ac", b"", False, False, False, "Dao: S House (in)"],
621: [620, 0, 0, 0, 0, "1a2e0", b"", False, False, False, "Dao: S House (out)"],
622: [623, 0, 0, 400, 406, "1a2b8", b"", False, False, False, "Dao: SE House (in)"],
623: [622, 0, 0, 0, 0, "1a318", b"", False, False, False, "Dao: SE House (out)"],
# Pyramid
634: [635, 0, 0, 411, 415, "", b"", False, True, False, "Pyramid: Map 204 to Map 205"],
635: [634, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 205 to Map 204"],
636: [637, 0, 0, 413, 416, "", b"", False, True, False, "Pyramid: Map 204 to Map 206"], # Room 1
637: [636, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 206 to Map 204"],
638: [639, 0, 0, 417, 418, "", b"", False, True, False, "Pyramid: Map 206 to Map 207"],
639: [638, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 207 to Map 206"],
640: [641, 0, 0, 419, 442, "", b"", False, True, False, "Pyramid: Map 207 to Map 218"],
641: [640, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 207"],
642: [643, 0, 0, 413, 420, "", b"", False, True, False, "Pyramid: Map 204 to Map 208"], # Room 2
643: [642, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 208 to Map 204"],
644: [645, 0, 0, 421, 422, "", b"", False, True, False, "Pyramid: Map 208 to Map 209"],
645: [644, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 209 to Map 208"],
646: [647, 0, 0, 423, 443, "", b"", False, True, False, "Pyramid: Map 209 to Map 218"],
647: [646, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 209"],
648: [649, 0, 0, 413, 431, "", b"", False, True, False, "Pyramid: Map 204 to Map 214"], # Room 3
649: [648, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 214 to Map 204"],
650: [651, 0, 0, 434, 435, "", b"", False, True, False, "Pyramid: Map 214 to Map 215"],
651: [650, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 214 to Map 215"],
652: [653, 0, 0, 435, 444, "", b"", False, True, False, "Pyramid: Map 215 to Map 218"],
653: [652, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 215"],
654: [655, 0, 0, 413, 436, "", b"", False, True, False, "Pyramid: Map 204 to Map 216"], # Room 4
655: [654, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 216 to Map 204"],
656: [657, 0, 0, 437, 438, "", b"", False, True, False, "Pyramid: Map 216 to Map 217"],
657: [656, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 217 to Map 216"],
658: [659, 0, 0, 439, 440, "", b"", False, True, False, "Pyramid: Map 217 to Map 219"],
659: [658, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 219 to Map 217"],
660: [661, 0, 0, 441, 445, "", b"", False, True, False, "Pyramid: Map 219 to Map 218"],
661: [660, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 219"],
662: [663, 0, 0, 413, 426, "", b"", False, True, False, "Pyramid: Map 204 to Map 212"], # Room 5
663: [662, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 212 to Map 204"],
664: [665, 0, 0, 429, 430, "", b"", False, True, False, "Pyramid: Map 212 to Map 213"],
665: [664, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 213 to Map 212"],
666: [667, 0, 0, 430, 446, "", b"", False, True, False, "Pyramid: Map 213 to Map 218"],
667: [666, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 213"],
668: [669, 0, 0, 413, 424, "", b"", False, True, False, "Pyramid: Map 204 to Map 210"], # Room 6
669: [668, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 210 to Map 204"],
670: [671, 0, 0, 424, 425, "", b"", False, True, False, "Pyramid: Map 210 to Map 211"],
671: [670, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 211 to Map 210"],
672: [673, 0, 0, 425, 447, "", b"", False, True, False, "Pyramid: Map 211 to Map 218"],
673: [672, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 211"],
# Babel
682: [683, 0, 0, 460, 461, "", b"", False, True, False, "Babel: Map 222 to Map 223"],
683: [682, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 223 to Map 222"],
684: [685, 0, 0, 462, 463, "", b"", False, True, False, "Babel: Map 223 to Map 224"],
685: [684, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 224 to Map 223"],
686: [687, 0, 0, 463, 474, "", b"", False, True, False, "Babel: Map 224 to Map 242"], # Castoth
687: [686, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 242 to Map 224"],
688: [689, 0, 0, 463, 475, "", b"", False, True, False, "Babel: Map 224 to Map 243"], # Viper
689: [688, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 243 to Map 224"],
690: [691, 0, 0, 463, 465, "", b"", False, True, False, "Babel: Map 224 to Map 225 (bottom)"],
691: [690, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 225 to Map 224 (bottom)"],
692: [693, 0, 0, 466, 464, "", b"", False, True, False, "Babel: Map 225 to Map 224 (top)"],
693: [692, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 224 to Map 225 (top)"],
694: [695, 0, 0, 464, 476, "", b"", False, True, False, "Babel: Map 224 to Map 244"], # Vampires
695: [694, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 244 to Map 224"],
696: [697, 0, 0, 464, 477, "", b"", False, True, False, "Babel: Map 224 to Map 245"], # Sand Fanger
697: [696, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 245 to Map 224"],
698: [699, 0, 0, 464, 469, "", b"", False, True, False, "Babel: Map 224 to Map 226"],
699: [698, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 226 to Map 224"],
#700: [701, 0, 0, 470, 471, "", b"", False, True, False, "Babel: Map 226 to Map 227"], #DUPLICATE W/BOSS EXITS
#701: [700, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 227 to Map 226"],
702: [703, 0, 0, 471, 478, "", b"", False, True, False, "Babel: Map 227 to Map 246"], # Mummy Queen
703: [702, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 246 to Map 227"],
704: [705, 0, 0, 471, 467, "", b"", False, True, False, "Babel: Map 227 to Map 225 (bottom)"],
705: [704, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 225 to Map 227 (bottom)"],
706: [707, 0, 0, 468, 472, "", b"", False, True, False, "Babel: Map 225 to Map 227 (top)"],
707: [706, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 227 to Map 225 (top)"],
708: [709, 0, 0, 472, 473, "", b"", False, True, False, "Babel: Map 227 to Map 222"],
709: [708, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 222 to Map 227"],
# Jeweler's Mansion
720: [721, 0, 0, 8, 480, "8d32a", b"", False, True, True, "Mansion entrance"],
721: [720, 0, 0, 480, 400, "8fcb4", b"", False, True, True, "Mansion exit"]
}
| [
"random.uniform",
"random.shuffle",
"binascii.hexlify",
"random.seed",
"graphviz.Digraph",
"time.time",
"random.randint"
] | [((19836, 19869), 'random.shuffle', 'random.shuffle', (['junk_locations[0]'], {}), '(junk_locations[0])\n', (19850, 19869), False, 'import random\n'), ((19878, 19911), 'random.shuffle', 'random.shuffle', (['junk_locations[1]'], {}), '(junk_locations[1])\n', (19892, 19911), False, 'import random\n'), ((19920, 19954), 'random.shuffle', 'random.shuffle', (['quest_locations[0]'], {}), '(quest_locations[0])\n', (19934, 19954), False, 'import random\n'), ((19963, 19997), 'random.shuffle', 'random.shuffle', (['quest_locations[1]'], {}), '(quest_locations[1])\n', (19977, 19997), False, 'import random\n'), ((24313, 24342), 'random.shuffle', 'random.shuffle', (['rewards_tier1'], {}), '(rewards_tier1)\n', (24327, 24342), False, 'import random\n'), ((24351, 24380), 'random.shuffle', 'random.shuffle', (['rewards_tier2'], {}), '(rewards_tier2)\n', (24365, 24380), False, 'import random\n'), ((24389, 24418), 'random.shuffle', 'random.shuffle', (['rewards_tier3'], {}), '(rewards_tier3)\n', (24403, 24418), False, 'import random\n'), ((24427, 24456), 'random.shuffle', 'random.shuffle', (['rewards_tier4'], {}), '(rewards_tier4)\n', (24441, 24456), False, 'import random\n'), ((41803, 41826), 'random.shuffle', 'random.shuffle', (['islands'], {}), '(islands)\n', (41817, 41826), False, 'import random\n'), ((71550, 71583), 'random.seed', 'random.seed', (['(self.seed + seed_adj)'], {}), '(self.seed + seed_adj)\n', (71561, 71583), False, 'import random\n'), ((72167, 72197), 'random.shuffle', 'random.shuffle', (['item_locations'], {}), '(item_locations)\n', (72181, 72197), False, 'import random\n'), ((78645, 78674), 'random.shuffle', 'random.shuffle', (['placement_log'], {}), '(placement_log)\n', (78659, 78674), False, 'import random\n'), ((82955, 83045), 'graphviz.Digraph', 'graphviz.Digraph', ([], {'graph_attr': "[('concentrate', 'true'), ('rankdir', 'TB')]", 'strict': '(True)'}), "(graph_attr=[('concentrate', 'true'), ('rankdir', 'TB')],\n strict=True)\n", (82971, 83045), False, 'import graphviz\n'), ((105946, 105978), 'random.shuffle', 'random.shuffle', (['destination_list'], {}), '(destination_list)\n', (105960, 105978), False, 'import random\n'), ((106194, 106226), 'random.shuffle', 'random.shuffle', (['destination_list'], {}), '(destination_list)\n', (106208, 106226), False, 'import random\n'), ((115243, 115263), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (115257, 115263), False, 'import random\n'), ((23093, 23113), 'random.shuffle', 'random.shuffle', (['area'], {}), '(area)\n', (23107, 23113), False, 'import random\n'), ((24531, 24551), 'random.shuffle', 'random.shuffle', (['area'], {}), '(area)\n', (24545, 24551), False, 'import random\n'), ((40440, 40468), 'random.shuffle', 'random.shuffle', (['one_way_dest'], {}), '(one_way_dest)\n', (40454, 40468), False, 'import random\n'), ((45849, 45877), 'random.shuffle', 'random.shuffle', (['origin_exits'], {}), '(origin_exits)\n', (45863, 45877), False, 'import random\n'), ((45890, 45916), 'random.shuffle', 'random.shuffle', (['dest_exits'], {}), '(dest_exits)\n', (45904, 45916), False, 'import random\n'), ((106534, 106559), 'random.shuffle', 'random.shuffle', (['continent'], {}), '(continent)\n', (106548, 106559), False, 'import random\n'), ((109400, 109432), 'random.shuffle', 'random.shuffle', (['insane_templates'], {}), '(insane_templates)\n', (109414, 109432), False, 'import random\n'), ((110019, 110039), 'random.shuffle', 'random.shuffle', (['sets'], {}), '(sets)\n', (110033, 110039), False, 'import random\n'), ((25748, 25778), 'random.shuffle', 'random.shuffle', (['nodes_to_check'], {}), '(nodes_to_check)\n', (25762, 25778), False, 'import random\n'), ((71638, 71663), 'random.randint', 'random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (71652, 71663), False, 'import random\n'), ((71686, 71710), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (71700, 71710), False, 'import random\n'), ((71766, 71791), 'random.randint', 'random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (71780, 71791), False, 'import random\n'), ((71814, 71838), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (71828, 71838), False, 'import random\n'), ((80745, 80756), 'time.time', 'time.time', ([], {}), '()\n', (80754, 80756), False, 'import time\n'), ((103679, 103703), 'binascii.hexlify', 'binascii.hexlify', (['map_id'], {}), '(map_id)\n', (103695, 103703), False, 'import binascii\n'), ((42817, 42845), 'random.shuffle', 'random.shuffle', (['origin_exits'], {}), '(origin_exits)\n', (42831, 42845), False, 'import random\n'), ((42866, 42896), 'random.shuffle', 'random.shuffle', (['dest_exits_new'], {}), '(dest_exits_new)\n', (42880, 42896), False, 'import random\n'), ((73187, 73211), 'random.shuffle', 'random.shuffle', (['to_place'], {}), '(to_place)\n', (73201, 73211), False, 'import random\n'), ((76126, 76148), 'random.uniform', 'random.uniform', (['(0)', '(100)'], {}), '(0, 100)\n', (76140, 76148), False, 'import random\n'), ((104132, 104160), 'binascii.hexlify', 'binascii.hexlify', (['map_header'], {}), '(map_header)\n', (104148, 104160), False, 'import binascii\n'), ((112313, 112344), 'random.shuffle', 'random.shuffle', (['new_enemies_tmp'], {}), '(new_enemies_tmp)\n', (112327, 112344), False, 'import random\n'), ((104268, 104296), 'binascii.hexlify', 'binascii.hexlify', (['map_header'], {}), '(map_header)\n', (104284, 104296), False, 'import binascii\n'), ((104540, 104568), 'binascii.hexlify', 'binascii.hexlify', (['map_header'], {}), '(map_header)\n', (104556, 104568), False, 'import binascii\n')] |
import datetime
import os
import time
from enum import Enum
import sys
from MediaPlayer.Player import vlc
from MediaPlayer.Player.vlc import libvlc_get_version, Media, MediaList
from Shared.Events import EventManager, EventType
from Shared.Logger import Logger, LogVerbosity
from Shared.Observable import Observable
from Shared.Settings import Settings
from Shared.Threading import CustomThread
from Shared.Util import Singleton
class VLCPlayer(metaclass=Singleton):
def __init__(self):
self.__vlc_instance = None
self.player_state = PlayerData()
self.instantiate_vlc()
self.media = None
self.__player = self.__vlc_instance.media_player_new()
self.__list_player = self.__vlc_instance.media_list_player_new()
self.__list_player.set_media_player(self.__player)
self.__event_manager = self.__player.event_manager()
self.set_volume(75)
EventManager.register_event(EventType.SetSubtitleFiles, self.set_subtitle_files)
EventManager.register_event(EventType.StopPlayer, self.stop)
self.player_observer = CustomThread(self.observe_player, "Player observer")
self.player_observer.start()
self.stop_player_thread = None
def instantiate_vlc(self):
parameters = self.get_instance_parameters()
Logger().write(LogVerbosity.Debug, "VLC parameters: " + str(parameters))
self.__vlc_instance = vlc.Instance("cvlc", *parameters)
Logger().write(LogVerbosity.Info, "VLC version " + libvlc_get_version().decode('utf8'))
def play(self, url, time=0):
parameters = self.get_play_parameters(url, time)
Logger().write(LogVerbosity.Info, "VLC Play | Url: " + url)
Logger().write(LogVerbosity.Info, "VLC Play | Time: " + str(time))
Logger().write(LogVerbosity.Info, "VLC Play | Parameters: " + str(parameters))
self.player_state.start_update()
self.player_state.path = url
self.player_state.stop_update()
self.media = Media(url, *parameters)
if 'youtube' in url:
media_list = MediaList()
media_list.add_media(self.media)
self.__list_player.set_media_list(media_list)
self.__list_player.play()
else:
self.__player.set_media(self.media)
self.__player.play()
@staticmethod
def get_instance_parameters():
params = ["--verbose=" + str(Settings.get_int("vlc_log_level")),
"--network-caching=" + str(Settings.get_int("network_caching")),
"--ipv4-timeout=500",
"--image-duration=-1"]
if sys.platform == "linux" or sys.platform == "linux2":
log_path = Settings.get_string("base_folder") + "/Logs/" + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
params.append("--logfile=" + log_path + '/vlc_' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + ".txt")
params.append("--file-logging")
params.append("--file-caching=5000")
return params
def get_play_parameters(self, url, time):
params = []
if time != 0:
params.append("start-time=" + str(time // 1000))
return params
def set_window(self, handle):
if sys.platform == "linux" or sys.platform == "linux2":
self.__player.set_xwindow(handle)
else:
self.__player.set_hwnd(handle)
def pause_resume(self):
Logger().write(LogVerbosity.All, "Player pause resume")
self.__player.pause()
def stop(self):
Logger().write(LogVerbosity.All, "Player stop")
thread = CustomThread(lambda: self.__player.stop(), "Stopping VLC player")
thread.start()
def set_volume(self, vol):
Logger().write(LogVerbosity.Debug, "Player set volume " + str(vol))
self.__player.audio_set_volume(vol)
self.player_state.start_update()
self.player_state.volume = vol
self.player_state.stop_update()
def get_volume(self):
return self.__player.audio_get_volume()
def get_position(self):
return self.__player.get_time()
def get_length(self):
return int(self.__player.get_length())
def set_time(self, pos):
Logger().write(LogVerbosity.Debug, "Player set time " + str(pos))
self.__player.set_time(pos)
self.player_state.start_update()
self.player_state.playing_for = pos
self.player_state.stop_update()
def set_position(self, pos):
Logger().write(LogVerbosity.Debug, "Player set position " + str(pos))
self.__player.set_position(pos)
def set_subtitle_delay(self, delay):
Logger().write(LogVerbosity.Debug, "Player set subtitle delay " + str(delay))
self.__player.video_set_spu_delay(delay)
self.player_state.start_update()
self.player_state.sub_delay = delay
self.player_state.stop_update()
def get_state(self):
return self.__player.get_state()
def get_audio_track(self):
return self.__player.audio_get_track()
def set_audio_track(self, track_id):
Logger().write(LogVerbosity.Debug, "Player set audio track " + str(track_id))
self.__player.audio_set_track(track_id)
self.player_state.start_update()
self.player_state.audio_track = track_id
self.player_state.stop_update()
def get_audio_tracks(self):
tracks = self.__player.audio_get_track_description()
result = []
for trackid, trackname in tracks:
result.append((trackid, trackname.decode('utf8')))
return result
def set_subtitle_files(self, files):
Logger().write(LogVerbosity.Debug, "Adding " + str(len(files)) + " subtitle files")
pi = sys.platform == "linux" or sys.platform == "linux2"
for file in reversed(files):
if not pi and file[1] != ":":
file = "C:" + file
file = file.replace("/", os.sep).replace("\\", os.sep)
# NOTE this must be called after Play()
self.__player.video_set_subtitle_file(file)
def set_subtitle_track(self, id):
Logger().write(LogVerbosity.Debug, "Player set subtitle track " + str(id))
self.__player.video_set_spu(id)
self.player_state.start_update()
self.player_state.sub_track = id
self.player_state.stop_update()
def get_subtitle_count(self):
return self.__player.video_get_spu_count()
def get_subtitle_tracks(self):
tracks = self.__player.video_get_spu_description()
result = []
for trackid, trackname in tracks:
result.append((trackid, trackname.decode('utf-8')))
return result
def get_subtitle_delay(self):
return self.__player.video_get_spu_delay()
def get_selected_sub(self):
return self.__player.video_get_spu()
def try_play_subitem(self):
media = self.__player.get_media()
if media is None:
self.stop()
return
subs = media.subitems()
if subs is None:
self.stop()
return
if len(subs) == 1:
subs[0].add_options("demux=avformat")
self.__player.set_media(subs[0])
self.__player.play()
def observe_player(self):
while True:
state = self.get_state().value
if state in [5, 6, 7]:
state = 0
new_state = PlayerState(state)
if new_state == PlayerState.Nothing and self.player_state.state != PlayerState.Nothing:
self.stop_player_thread = CustomThread(self.stop, "Stopping player")
self.stop_player_thread.start()
self.player_state.start_update()
self.player_state.state = new_state
self.player_state.playing_for = self.get_position()
self.player_state.length = self.get_length()
self.player_state.audio_tracks = self.get_audio_tracks()
self.player_state.audio_track = self.get_audio_track()
self.player_state.sub_delay = self.get_subtitle_delay()
self.player_state.sub_track = self.get_selected_sub()
self.player_state.sub_tracks = self.get_subtitle_tracks()
self.player_state.volume = self.get_volume()
self.player_state.stop_update()
time.sleep(0.5)
class PlayerState(Enum):
Nothing = 0
Opening = 1
Buffering = 2
Playing = 3
Paused = 4
class PlayerData(Observable):
def __init__(self):
super().__init__("PlayerData", 0.5)
self.path = None
self.state = PlayerState.Nothing
self.playing_for = 0
self.length = 0
self.volume = 0
self.sub_delay = 0
self.sub_track = 0
self.sub_tracks = []
self.audio_track = 0
self.audio_tracks = []
| [
"Shared.Settings.Settings.get_int",
"MediaPlayer.Player.vlc.Media",
"Shared.Threading.CustomThread",
"MediaPlayer.Player.vlc.MediaList",
"time.sleep",
"datetime.datetime.now",
"MediaPlayer.Player.vlc.Instance",
"Shared.Logger.Logger",
"Shared.Events.EventManager.register_event",
"Shared.Settings.S... | [((926, 1011), 'Shared.Events.EventManager.register_event', 'EventManager.register_event', (['EventType.SetSubtitleFiles', 'self.set_subtitle_files'], {}), '(EventType.SetSubtitleFiles, self.set_subtitle_files\n )\n', (953, 1011), False, 'from Shared.Events import EventManager, EventType\n'), ((1015, 1075), 'Shared.Events.EventManager.register_event', 'EventManager.register_event', (['EventType.StopPlayer', 'self.stop'], {}), '(EventType.StopPlayer, self.stop)\n', (1042, 1075), False, 'from Shared.Events import EventManager, EventType\n'), ((1108, 1160), 'Shared.Threading.CustomThread', 'CustomThread', (['self.observe_player', '"""Player observer"""'], {}), "(self.observe_player, 'Player observer')\n", (1120, 1160), False, 'from Shared.Threading import CustomThread\n'), ((1432, 1465), 'MediaPlayer.Player.vlc.Instance', 'vlc.Instance', (['"""cvlc"""', '*parameters'], {}), "('cvlc', *parameters)\n", (1444, 1465), False, 'from MediaPlayer.Player import vlc\n'), ((2025, 2048), 'MediaPlayer.Player.vlc.Media', 'Media', (['url', '*parameters'], {}), '(url, *parameters)\n', (2030, 2048), False, 'from MediaPlayer.Player.vlc import libvlc_get_version, Media, MediaList\n'), ((2103, 2114), 'MediaPlayer.Player.vlc.MediaList', 'MediaList', ([], {}), '()\n', (2112, 2114), False, 'from MediaPlayer.Player.vlc import libvlc_get_version, Media, MediaList\n'), ((8425, 8440), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (8435, 8440), False, 'import time\n'), ((1329, 1337), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (1335, 1337), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((1474, 1482), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (1480, 1482), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((1662, 1670), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (1668, 1670), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((1730, 1738), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (1736, 1738), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((1805, 1813), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (1811, 1813), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((3486, 3494), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (3492, 3494), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((3601, 3609), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (3607, 3609), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((3795, 3803), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (3801, 3803), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((4283, 4291), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (4289, 4291), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((4552, 4560), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (4558, 4560), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((4712, 4720), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (4718, 4720), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((5160, 5168), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (5166, 5168), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((5707, 5715), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (5713, 5715), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((6192, 6200), 'Shared.Logger.Logger', 'Logger', ([], {}), '()\n', (6198, 6200), False, 'from Shared.Logger import Logger, LogVerbosity\n'), ((7665, 7707), 'Shared.Threading.CustomThread', 'CustomThread', (['self.stop', '"""Stopping player"""'], {}), "(self.stop, 'Stopping player')\n", (7677, 7707), False, 'from Shared.Threading import CustomThread\n'), ((2442, 2475), 'Shared.Settings.Settings.get_int', 'Settings.get_int', (['"""vlc_log_level"""'], {}), "('vlc_log_level')\n", (2458, 2475), False, 'from Shared.Settings import Settings\n'), ((2523, 2558), 'Shared.Settings.Settings.get_int', 'Settings.get_int', (['"""network_caching"""'], {}), "('network_caching')\n", (2539, 2558), False, 'from Shared.Settings import Settings\n'), ((2730, 2764), 'Shared.Settings.Settings.get_string', 'Settings.get_string', (['"""base_folder"""'], {}), "('base_folder')\n", (2749, 2764), False, 'from Shared.Settings import Settings\n'), ((1525, 1545), 'MediaPlayer.Player.vlc.libvlc_get_version', 'libvlc_get_version', ([], {}), '()\n', (1543, 1545), False, 'from MediaPlayer.Player.vlc import libvlc_get_version, Media, MediaList\n'), ((2778, 2801), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2799, 2801), False, 'import datetime\n'), ((2894, 2917), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2915, 2917), False, 'import datetime\n')] |
from discord.ext import commands
import discord
from datetime import datetime
from src import util
from tools.checker import Checker,Embed
class Meta(commands.Cog):
"""Commands relating to the bot itself."""
def __init__(self, bot):
self.bot = bot
self.start_time = datetime.now()
bot.remove_command("help")
@commands.command(name="업타임")
async def uptime(self, ctx):
ch = Checker(ctx=ctx)
em = Embed(ctx=ctx)
if await ch.licence() == 400:
return await ctx.send(embed=em.no_())
elif await ch.licence() == 200:
pass
"""Tells how long the bot has been running."""
uptime_seconds = round(
(datetime.now() - self.start_time).total_seconds())
await ctx.send(f"> 봇이 작동한시간: {util.format_seconds(uptime_seconds)}"
)
def setup(bot):
bot.add_cog(Meta(bot)) | [
"tools.checker.Checker",
"tools.checker.Embed",
"datetime.datetime.now",
"src.util.format_seconds",
"discord.ext.commands.command"
] | [((350, 378), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""업타임"""'}), "(name='업타임')\n", (366, 378), False, 'from discord.ext import commands\n'), ((292, 306), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (304, 306), False, 'from datetime import datetime\n'), ((425, 441), 'tools.checker.Checker', 'Checker', ([], {'ctx': 'ctx'}), '(ctx=ctx)\n', (432, 441), False, 'from tools.checker import Checker, Embed\n'), ((455, 469), 'tools.checker.Embed', 'Embed', ([], {'ctx': 'ctx'}), '(ctx=ctx)\n', (460, 469), False, 'from tools.checker import Checker, Embed\n'), ((715, 729), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (727, 729), False, 'from datetime import datetime\n'), ((818, 853), 'src.util.format_seconds', 'util.format_seconds', (['uptime_seconds'], {}), '(uptime_seconds)\n', (837, 853), False, 'from src import util\n')] |
from logging import warning
from api import gitlab
from utilities import validate, types
gitlab = gitlab.GitLab(types.Arguments().url)
def get_all(projects):
snippets = {}
for project in projects:
for key, value in project.items():
details = gitlab.get_project_snippets(key)
if validate.api_result(details):
warning("[*] Found %s snippets for project %s", len(details), value)
for item in details:
snippets.update({item['id']: item['web_url']})
return snippets
def sniff_secrets(snippets):
if len(snippets) == 0:
return []
secrets = []
raw_data = {}
for snippet_id, snippet_url in snippets.items():
raw_content = gitlab.get_snippet_raw(snippet_id)
raw_data.update({snippet_url: raw_content})
if len(raw_data) > 0:
monitor = types.SecretsMonitor()
found_secrets = monitor.sniff_secrets(raw_data)
for secret in found_secrets:
secrets.append(secret)
return secrets
| [
"api.gitlab.get_project_snippets",
"api.gitlab.get_snippet_raw",
"utilities.types.Arguments",
"utilities.validate.api_result",
"utilities.types.SecretsMonitor"
] | [((114, 131), 'utilities.types.Arguments', 'types.Arguments', ([], {}), '()\n', (129, 131), False, 'from utilities import validate, types\n'), ((747, 781), 'api.gitlab.get_snippet_raw', 'gitlab.get_snippet_raw', (['snippet_id'], {}), '(snippet_id)\n', (769, 781), False, 'from api import gitlab\n'), ((878, 900), 'utilities.types.SecretsMonitor', 'types.SecretsMonitor', ([], {}), '()\n', (898, 900), False, 'from utilities import validate, types\n'), ((274, 306), 'api.gitlab.get_project_snippets', 'gitlab.get_project_snippets', (['key'], {}), '(key)\n', (301, 306), False, 'from api import gitlab\n'), ((322, 350), 'utilities.validate.api_result', 'validate.api_result', (['details'], {}), '(details)\n', (341, 350), False, 'from utilities import validate, types\n')] |
import requests
if __name__ == "__main__":
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:73.0) Gecko/20100101 Firefox/73.0',
'Connection': 'keep-alive',
'cookie': 'replace your cookie here' # update text
}
session = requests.Session()
response = session.get('https://weibo.com/2671109275/fans?rightmod=1&wvr=6', headers=headers)
print(response.text)
print(response.status_code) | [
"requests.Session"
] | [((497, 515), 'requests.Session', 'requests.Session', ([], {}), '()\n', (513, 515), False, 'import requests\n')] |
import matplotlib.pyplot as plt
import requests
import pandas as pd
import json
data = requests.get(r'https://www.bitstamp.net/api/v2/order_book/ethbtc')
data = data.json()
bids = pd.DataFrame()
bids['quantity'] = [i[1] for i in data['bids']]
bids['price'] = [i[0] for i in data['bids']]
asks = pd.DataFrame()
asks['price'] = [i[0] for i in data['asks']]
asks['quantity'] = [i[1] for i in data['asks']]
asks.price = asks.price.apply(float)
asks.quantity = asks.quantity.apply(float)
bids.price = bids.price.apply(float)
bids.quantity = bids.quantity.apply(float)
bids_dict = {x[1]:x[0] for x in bids.itertuples(index=False)}
asks_dict = {x[0]:x[1] for x in asks.itertuples(index=False)}
bidask = dict()
bidask['asks'] = asks_dict
bidask['bids'] = bids_dict
data['asks'] = [{'price':float(i[0]), 'amount':float(i[1])} for i in data['asks']]
data['bids'] = [{'price':float(i[0]), 'amount':float(i[1])} for i in data['bids']]
with open('order_book2.json', 'w') as fp:
json.dump(data, fp)
def plot_ob(bidask, bps=.25):
# bps: basis points
best_bid = max(bidask["bids"].keys())
best_ask = min(bidask["asks"].keys())
worst_bid = best_bid * (1 - bps)
worst_ask = best_bid * (1 + bps)
filtered_bids = sorted(filter(lambda k: k[0] >= worst_bid, bidask['bids'].items()), key=lambda x:-x[0])
filtered_asks = sorted(filter(lambda k: k[0] <= worst_ask, bidask['asks'].items()), key=lambda x:+x[0])
bsizeacc = 0
bhys = [] # bid - horizontal - ys
bhxmins = [] # bid - horizontal - xmins
bhxmaxs = [] # ...
bvxs = []
bvymins = []
bvymaxs = []
asizeacc = 0
ahys = []
ahxmins = []
ahxmaxs = []
avxs = []
avymins = []
avymaxs = []
for (p1, s1), (p2, s2) in zip(filtered_bids, filtered_bids[1:]):
bvymins.append(bsizeacc)
if bsizeacc == 0:
bsizeacc += s1
bhys.append(bsizeacc)
bhxmins.append(p2)
bhxmaxs.append(p1)
bvxs.append(p2)
bsizeacc += s2
bvymaxs.append(bsizeacc)
for (p1, s1), (p2, s2) in zip(filtered_asks, filtered_asks[1:]):
avymins.append(asizeacc)
if asizeacc == 0:
asizeacc += s1
ahys.append(asizeacc)
ahxmins.append(p1)
ahxmaxs.append(p2)
avxs.append(p2)
asizeacc += s2
avymaxs.append(asizeacc)
plt.hlines(bhys, bhxmins, bhxmaxs, color="green")
plt.vlines(bvxs, bvymins, bvymaxs, color="green")
plt.hlines(ahys, ahxmins, ahxmaxs, color="red")
plt.vlines(avxs, avymins, avymaxs, color="red")
# d_ts = max(ob.keys())
# d_ob = ob[d_ts]
plt.figure(figsize=(5,4))
plot_ob(bidask, bps=.05)
plt.ylim([0, 4000])
plt.show() | [
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.hlines",
"requests.get",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"json.dump",
"matplotlib.pyplot.show"
] | [((88, 153), 'requests.get', 'requests.get', (['"""https://www.bitstamp.net/api/v2/order_book/ethbtc"""'], {}), "('https://www.bitstamp.net/api/v2/order_book/ethbtc')\n", (100, 153), False, 'import requests\n'), ((182, 196), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (194, 196), True, 'import pandas as pd\n'), ((297, 311), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (309, 311), True, 'import pandas as pd\n'), ((2628, 2654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (2638, 2654), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2698), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 4000]'], {}), '([0, 4000])\n', (2687, 2698), True, 'import matplotlib.pyplot as plt\n'), ((2699, 2709), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2707, 2709), True, 'import matplotlib.pyplot as plt\n'), ((975, 994), 'json.dump', 'json.dump', (['data', 'fp'], {}), '(data, fp)\n', (984, 994), False, 'import json\n'), ((2373, 2422), 'matplotlib.pyplot.hlines', 'plt.hlines', (['bhys', 'bhxmins', 'bhxmaxs'], {'color': '"""green"""'}), "(bhys, bhxmins, bhxmaxs, color='green')\n", (2383, 2422), True, 'import matplotlib.pyplot as plt\n'), ((2427, 2476), 'matplotlib.pyplot.vlines', 'plt.vlines', (['bvxs', 'bvymins', 'bvymaxs'], {'color': '"""green"""'}), "(bvxs, bvymins, bvymaxs, color='green')\n", (2437, 2476), True, 'import matplotlib.pyplot as plt\n'), ((2481, 2528), 'matplotlib.pyplot.hlines', 'plt.hlines', (['ahys', 'ahxmins', 'ahxmaxs'], {'color': '"""red"""'}), "(ahys, ahxmins, ahxmaxs, color='red')\n", (2491, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2533, 2580), 'matplotlib.pyplot.vlines', 'plt.vlines', (['avxs', 'avymins', 'avymaxs'], {'color': '"""red"""'}), "(avxs, avymins, avymaxs, color='red')\n", (2543, 2580), True, 'import matplotlib.pyplot as plt\n')] |
import os
import aiohttp
import asyncio
import json
import time
import datetime
import logging
import gidgethub
import requests
from gidgethub import aiohttp as gh_aiohttp
import sys
import pandas as pd
sys.path.append("..")
from utils.auth import get_jwt, get_installation, get_installation_access_token
from utils.test_auth_ipipe import xlyOpenApiRequest
from utils.readConfig import ReadConfig
logging.basicConfig(
level=logging.INFO,
filename='../logs/regularMark.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
localConfig = ReadConfig(path='../conf/config.ini')
class MarkTimeoutCI(object):
def __init__(self, user, repo, gh):
self.pr_url = 'https://api.github.com/repos/%s/%s/pulls?per_page=100&page=1&q=addClass' % (
user, repo)
self.gh = gh
self.user = user
self.repo = repo
self.mark_url = 'https://xly.bce.baidu.com/open-api/ipipe/rest/v1/job-builds/{}/mark'
self.rerun_url = 'http://www.cipaddlepaddle.cn:8081/%s/%s/{}/{}' % (
user, repo)
self.comment_url = 'https://api.github.com/repos/%s/%s/issues/{}/comments' % (
user, repo)
def getNextUrl(self, link):
"""遍历所有的PR"""
next_str = None
for i in link.split(','):
if 'rel="next"' in i:
next_str = i
break
if next_str != None:
start_index = next_str.index('<')
end_index = next_str.index('>')
url = next_str[start_index + 1:end_index]
else:
url = None
return url
async def getBeforeSevenDaysPRList(self):
"""
1. 获取距离现在7天-30天创建的PR列表:只获取,不做处理
2. 30天之前的暂不处理: 默认认为GitHub已经设它们为code conflicts. 如有需要,后续在处理。
return : [{PR, commit, status_url}]
"""
today = datetime.date.today()
seven_Days_ago = str(today - datetime.timedelta(days=7))
month_Days_ago = str(today - datetime.timedelta(days=30))
overduelist = []
while (self.pr_url != None):
(code, header, body) = await self.gh._request(
"GET", self.pr_url,
{'accept': 'application/vnd.github.antiope-preview+json'})
res = json.loads(body.decode('utf8'))
for item in res:
if item['created_at'] < seven_Days_ago and item[
'created_at'] > month_Days_ago:
item_dic = {}
item_dic['PR'] = item['number']
item_dic['commit'] = item['head']['sha']
item_dic['status_url'] = item['statuses_url']
overduelist.append(item_dic)
self.pr_url = self.getNextUrl(header['link'])
print("before %s's PRs: %s" % (seven_Days_ago, overduelist))
logger.info("before %s's PRs: %s" % (seven_Days_ago, overduelist))
return overduelist
async def getCIstatus(self):
"""
获取符合条件的PR的CI列表:
1. 获取PR最新的commit url
2. 获取1的commit的最近的CI(去除一些GitHub的脏数据(eg. pending状态的))
3. 判断最近的CI是否是7天之前的,只要有一条CI是7天之前的就需要标记
4. 只标记成功的CI为失败
"""
PRList = await self.getBeforeSevenDaysPRList()
today = datetime.date.today()
seven_Days_ago = str(today - datetime.timedelta(days=7))
CI_STATUS_LIST = []
for item in PRList:
commit_ci_status = {}
commit_ci_status['PR'] = item['PR']
commit_ci_status['commit'] = item['commit']
status_url = item['status_url']
res = requests.get(status_url,
headers={'authorization': "token xxx"},
timeout=15).json()
commit_ci_status['CI'] = []
if_before_seven_day = [] #标记是否所有的CI都是7天之前的
for ci in res:
already_exit = False
if ci['context'] != 'license/cla':
for i in commit_ci_status['CI']:
if ci['context'] == i['ciName'] and i['time'] > ci[
'created_at']: #删除一些脏数据 github api
already_exit = True
break
if already_exit == False:
item_dic = {}
item_dic['time'] = ci['created_at']
item_dic['ciName'] = ci['context']
item_dic['status'] = ci['state']
item_dic['markId'] = ci['target_url'].split('/')[-1]
commit_ci_status['CI'].append(item_dic)
if item_dic['time'] > seven_Days_ago: #最新的一次CI不是7天之前的
if_before_seven_day.append(False)
else:
if_before_seven_day.append(True) #True 是7天之前的
if True in if_before_seven_day: #只要有一个CI是七天之前的就必须标记
print('%s is 7 ago..........' % item['PR'])
CI_STATUS_LIST.append(commit_ci_status)
else:
print('%s not 7 ago' % item['PR'])
logger.info("need to mark ci list: %s" % CI_STATUS_LIST)
return CI_STATUS_LIST
async def markCIFailed(self):
"""
mark success/pending ci to failed
"""
CIStatusList = await self.getCIstatus()
REQUIRED_CI = localConfig.cf.get('%s/%s' % (self.user, self.repo),
'REQUIRED_CI')
DATA = {"data": "FAIL", "message": "Paddle-bot", "type": "MARK"}
json_str = json.dumps(DATA)
headers = {
"Content-Type": "application/json",
"IPIPE-UID": "Paddle-bot"
}
for item in CIStatusList:
PR = item['PR']
commit = item['commit']
ci_list = item['CI']
mark_ci_list = []
for ci in ci_list:
if ci['ciName'] in REQUIRED_CI and ci[
'status'] in ['success', 'pending']:
markId = ci['markId']
mark_url = self.mark_url.format(markId)
res = xlyOpenApiRequest().post_method(
mark_url, json_str, headers=headers)
if res.status_code == 200 or res.status_code == 201:
mark_ci_list.append(ci['ciName'])
print('%s_%s_%s mark success!' %
(PR, commit, ci['ciName']))
logger.info('%s_%s_%s mark success!' %
(PR, commit, ci['ciName']))
else:
print('%s_%s_%s mark failed!' %
(PR, commit, ci['ciName']))
logger.error('%s_%s_%s mark failed!' %
(PR, commit, ci['ciName']))
if len(mark_ci_list) > 0:
marked = self.queryIfHasMark(PR, commit)
if marked == False:
self.inform(item)
else:
print('%s_%s has marked!!!!' % (PR, commit))
logger.info('%s_%s has marked!!!!' % (PR, commit))
data = {
'TIME': time.strftime("%Y%m%d %H:%M:%S", time.localtime()),
'PR': PR,
'COMMITID': commit,
'CINAME': mark_ci_list
}
self.save_markci_job(data)
def queryIfHasMark(self, PR, commitid):
"""marked 是否已经标记过"""
marked = True
df = pd.read_csv('../buildLog/mark_timeout_ci.csv')
queryKey = df[(df['PR'] == PR) & (df['COMMITID'] == commitid)]
if queryKey.empty:
marked = False
return marked
def create_markci_csv(self, filename):
"""创建存储文件"""
df = pd.DataFrame(columns=['TIME', 'PR', 'COMMITID', 'CINAME'])
df.to_csv(filename)
def save_markci_job(self, data):
"""将kill的任务存到"""
filename = '../buildLog/mark_timeout_ci.csv'
if os.path.exists(filename) == False:
self.create_markci_csv(filename)
write_data = pd.DataFrame(data)
write_data.to_csv(filename, mode='a', header=False)
async def inform(self, item):
"""Paddle-bot发出评论"""
#POST /repos/:owner/:repo/issues/:issue_number/comments
rerun_ci_link = self.rerun_url.format(item['PR'], item['commit'])
comment_url = self.comment_url.format(item['PR'])
shortId = item['commit'][0:7]
message = "Sorry to inform you that %s's CIs have passed for more than 7 days. To prevent PR conflicts, you need to re-run all CIs manually. " % shortId
await self.gh.post(comment_url, data={"body": message})
async def main(user, repo):
async with aiohttp.ClientSession() as session:
app_id = os.getenv("GH_APP_ID")
jwt = get_jwt(app_id)
gh = gh_aiohttp.GitHubAPI(session, user)
try:
installation = await get_installation(gh, jwt, user)
except ValueError as ve:
print(ve)
else:
access_token = await get_installation_access_token(
gh, jwt=jwt, installation_id=installation["id"])
# treat access_token as if a personal access token
gh = gh_aiohttp.GitHubAPI(
session, user, oauth_token=access_token["token"])
markCIObject = MarkTimeoutCI(user, repo, gh)
await markCIObject.markCIFailed()
loop = asyncio.get_event_loop()
loop.run_until_complete(main('PaddlePaddle', 'Paddle'))
| [
"logging.getLogger",
"pandas.read_csv",
"utils.readConfig.ReadConfig",
"utils.auth.get_jwt",
"datetime.timedelta",
"sys.path.append",
"os.path.exists",
"json.dumps",
"pandas.DataFrame",
"time.localtime",
"asyncio.get_event_loop",
"utils.auth.get_installation_access_token",
"requests.get",
... | [((203, 224), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (218, 224), False, 'import sys\n'), ((398, 540), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': '"""../logs/regularMark.log"""', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, filename='../logs/regularMark.log',\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (417, 540), False, 'import logging\n'), ((559, 586), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (576, 586), False, 'import logging\n'), ((602, 639), 'utils.readConfig.ReadConfig', 'ReadConfig', ([], {'path': '"""../conf/config.ini"""'}), "(path='../conf/config.ini')\n", (612, 639), False, 'from utils.readConfig import ReadConfig\n'), ((9571, 9595), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9593, 9595), False, 'import asyncio\n'), ((1877, 1898), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1896, 1898), False, 'import datetime\n'), ((3264, 3285), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3283, 3285), False, 'import datetime\n'), ((5607, 5623), 'json.dumps', 'json.dumps', (['DATA'], {}), '(DATA)\n', (5617, 5623), False, 'import json\n'), ((7626, 7672), 'pandas.read_csv', 'pd.read_csv', (['"""../buildLog/mark_timeout_ci.csv"""'], {}), "('../buildLog/mark_timeout_ci.csv')\n", (7637, 7672), True, 'import pandas as pd\n'), ((7898, 7956), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['TIME', 'PR', 'COMMITID', 'CINAME']"}), "(columns=['TIME', 'PR', 'COMMITID', 'CINAME'])\n", (7910, 7956), True, 'import pandas as pd\n'), ((8213, 8231), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (8225, 8231), True, 'import pandas as pd\n'), ((8860, 8883), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (8881, 8883), False, 'import aiohttp\n'), ((8913, 8935), 'os.getenv', 'os.getenv', (['"""GH_APP_ID"""'], {}), "('GH_APP_ID')\n", (8922, 8935), False, 'import os\n'), ((8950, 8965), 'utils.auth.get_jwt', 'get_jwt', (['app_id'], {}), '(app_id)\n', (8957, 8965), False, 'from utils.auth import get_jwt, get_installation, get_installation_access_token\n'), ((8979, 9014), 'gidgethub.aiohttp.GitHubAPI', 'gh_aiohttp.GitHubAPI', (['session', 'user'], {}), '(session, user)\n', (8999, 9014), True, 'from gidgethub import aiohttp as gh_aiohttp\n'), ((8112, 8136), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (8126, 8136), False, 'import os\n'), ((9371, 9441), 'gidgethub.aiohttp.GitHubAPI', 'gh_aiohttp.GitHubAPI', (['session', 'user'], {'oauth_token': "access_token['token']"}), "(session, user, oauth_token=access_token['token'])\n", (9391, 9441), True, 'from gidgethub import aiohttp as gh_aiohttp\n'), ((1936, 1962), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (1954, 1962), False, 'import datetime\n'), ((2001, 2028), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (2019, 2028), False, 'import datetime\n'), ((3323, 3349), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (3341, 3349), False, 'import datetime\n'), ((9061, 9092), 'utils.auth.get_installation', 'get_installation', (['gh', 'jwt', 'user'], {}), '(gh, jwt, user)\n', (9077, 9092), False, 'from utils.auth import get_jwt, get_installation, get_installation_access_token\n'), ((9195, 9273), 'utils.auth.get_installation_access_token', 'get_installation_access_token', (['gh'], {'jwt': 'jwt', 'installation_id': "installation['id']"}), "(gh, jwt=jwt, installation_id=installation['id'])\n", (9224, 9273), False, 'from utils.auth import get_jwt, get_installation, get_installation_access_token\n'), ((3607, 3683), 'requests.get', 'requests.get', (['status_url'], {'headers': "{'authorization': 'token xxx'}", 'timeout': '(15)'}), "(status_url, headers={'authorization': 'token xxx'}, timeout=15)\n", (3619, 3683), False, 'import requests\n'), ((7324, 7340), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7338, 7340), False, 'import time\n'), ((6176, 6195), 'utils.test_auth_ipipe.xlyOpenApiRequest', 'xlyOpenApiRequest', ([], {}), '()\n', (6193, 6195), False, 'from utils.test_auth_ipipe import xlyOpenApiRequest\n')] |
from .abstract import AbstractAgentBasedModel
import keras.backend as K
import numpy as np
from tensorflow import TensorShape
from keras.layers import Dense, Reshape
class TrajectorySamplerNetwork(AbstractAgentBasedModel):
'''
Supervised model. Takes in a set of trajectories from the current state;
learns a distribution that will regenerate these given some source of
noise.
Essentially, our goal is to minimize the average error between the whole
set of trajectories and our samples.
'''
def __init__(self):
pass
def AddSamplerLayer(x, num_samples, traj_length, feature_size, activation=None):
'''
Size of x must be reasonable. This turns the dense input into something
reasonable.
Parameters:
x: input tensor
num_samples: number of trajectories to generate
traj_length: how many points we want to sample in each trajectory
feature_size: dimensionality of each trajectory point
activation: optional activation function to add
'''
x = Dense(num_samples * traj_length * feature_size)(x)
if activation is not None:
x = activation(x)
x = Reshape((num_samples, traj_length, feature_size))(x)
return x
class TrajectorySamplerLoss(object):
def __init__(self, num_samples, traj_length, feature_size, acc_cost=None):
self.num_samples = num_samples
self.traj_length = traj_length
self.feature_size = feature_size
self.acc_cost = acc_cost
self.__name__ = "trajectory_sampler_loss"
def __call__(self, target, pred):
'''
Pred must be of size:
[batch_size=None, num_samples, traj_length, feature_size]
Targets must be of size:
[batch_size=None, traj_length, feature_size]
You can use the tools in "split" to generate this sort of data (for
targets). The actual loss function is just the L2 norm between each
point.
'''
# NOTE: cannot tile here, because target and pred have to be the same
# size. THAKS A LOT, KERAS.
# Tile each example point by the total number of samples
# target = K.tile(target, TensorShape([1,self.num_samples,1,1]))
# Compute L2 norm...
x = K.square(target - pred)
# sum along each output dimension for each point
x = K.sum(x,axis=-1,keepdims=False)
# square root and sum along each trajectory
x = K.sum(K.sqrt(x),axis=2,keepdims=False)
# mean across each sample
#x = K.min(x,axis=1,keepdims=False)
x = K.mean(x,axis=1,keepdims=False) #+ K.min(x,axis=1,keepdims=False)
if self.acc_cost is not None:
# Take the L2 norm of the acceleration output and add it to the
# loss.
# NOTE: we may end up computing this elsewhere to avoid extra
# penalties and stuff like that.
#cost = K.sum(K.square(acc))
return x + cost
else:
return x
| [
"keras.backend.sum",
"keras.backend.sqrt",
"keras.backend.mean",
"keras.backend.square",
"keras.layers.Dense",
"keras.layers.Reshape"
] | [((1029, 1076), 'keras.layers.Dense', 'Dense', (['(num_samples * traj_length * feature_size)'], {}), '(num_samples * traj_length * feature_size)\n', (1034, 1076), False, 'from keras.layers import Dense, Reshape\n'), ((1145, 1194), 'keras.layers.Reshape', 'Reshape', (['(num_samples, traj_length, feature_size)'], {}), '((num_samples, traj_length, feature_size))\n', (1152, 1194), False, 'from keras.layers import Dense, Reshape\n'), ((2247, 2270), 'keras.backend.square', 'K.square', (['(target - pred)'], {}), '(target - pred)\n', (2255, 2270), True, 'import keras.backend as K\n'), ((2340, 2373), 'keras.backend.sum', 'K.sum', (['x'], {'axis': '(-1)', 'keepdims': '(False)'}), '(x, axis=-1, keepdims=False)\n', (2345, 2373), True, 'import keras.backend as K\n'), ((2565, 2598), 'keras.backend.mean', 'K.mean', (['x'], {'axis': '(1)', 'keepdims': '(False)'}), '(x, axis=1, keepdims=False)\n', (2571, 2598), True, 'import keras.backend as K\n'), ((2442, 2451), 'keras.backend.sqrt', 'K.sqrt', (['x'], {}), '(x)\n', (2448, 2451), True, 'import keras.backend as K\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Start Pymol and Bokeh server """
from __future__ import print_function
import time
import shlex
import subprocess
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME>"
__lience__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def execute_command(cmd_line):
args = shlex.split(cmd_line)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
return output, errors
def start_screen_command(cmd, session_name):
cmd_line = "screen -d -m -S %s %s" % (session_name, cmd)
return execute_command(cmd_line)
def stop_screen_command(session_name):
cmd_line = "screen -S %s -X quit" % session_name
return execute_command(cmd_line)
def main():
try:
# Start Bokeh server and PyMOL
start_screen_command("bokeh serve", "visu_bokeh")
start_screen_command("pymol -R", "visu_pymol")
# Dirty hack to be sure Bokeh and Pymol are running...
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
finally:
# Kill all screen session
stop_screen_command("visu_bokeh")
stop_screen_command("visu_pymol")
if __name__ == "__main__":
main()
| [
"shlex.split",
"subprocess.Popen",
"time.sleep"
] | [((343, 364), 'shlex.split', 'shlex.split', (['cmd_line'], {}), '(cmd_line)\n', (354, 364), False, 'import shlex\n'), ((373, 443), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (389, 443), False, 'import subprocess\n'), ((1062, 1078), 'time.sleep', 'time.sleep', (['(3600)'], {}), '(3600)\n', (1072, 1078), False, 'import time\n')] |
import numpy as np
from rampwf.utils import BaseGenerativeRegressor
class GenerativeRegressor(BaseGenerativeRegressor):
def __init__(self, max_dists, target_dim):
self.decomposition = 'autoregressive'
def fit(self, X_array, y_array):
pass
def predict(self, X_array):
# constant prediction with value equal to 10
n_samples = X_array.shape[0]
types = ['norm']
means = np.full(shape=(n_samples, 1), fill_value=10)
sigmas = np.zeros((n_samples, 1))
params = np.concatenate((means, sigmas), axis=1)
weights = np.ones((n_samples, 1))
return weights, types, params
| [
"numpy.full",
"numpy.zeros",
"numpy.ones",
"numpy.concatenate"
] | [((431, 475), 'numpy.full', 'np.full', ([], {'shape': '(n_samples, 1)', 'fill_value': '(10)'}), '(shape=(n_samples, 1), fill_value=10)\n', (438, 475), True, 'import numpy as np\n'), ((493, 517), 'numpy.zeros', 'np.zeros', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (501, 517), True, 'import numpy as np\n'), ((536, 575), 'numpy.concatenate', 'np.concatenate', (['(means, sigmas)'], {'axis': '(1)'}), '((means, sigmas), axis=1)\n', (550, 575), True, 'import numpy as np\n'), ((594, 617), 'numpy.ones', 'np.ones', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (601, 617), True, 'import numpy as np\n')] |
#!/usr/bin/python
import argparse
from src.SCXML_Parser.Scxml_parsor import Scxml_parsor
from src.arduino_helper.generate_fsm import generate_fsm
parser = argparse.ArgumentParser()
parser.add_argument('-f', action='store', dest='file', type=str, required=False, default="fsm.xml")
inargs = parser.parse_args()
print ("Beginning of the arduino fsm generator")
parser = Scxml_parsor(inargs.file)
generate_fsm(parser)
print("End") | [
"src.arduino_helper.generate_fsm.generate_fsm",
"argparse.ArgumentParser",
"src.SCXML_Parser.Scxml_parsor.Scxml_parsor"
] | [((157, 182), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (180, 182), False, 'import argparse\n'), ((372, 397), 'src.SCXML_Parser.Scxml_parsor.Scxml_parsor', 'Scxml_parsor', (['inargs.file'], {}), '(inargs.file)\n', (384, 397), False, 'from src.SCXML_Parser.Scxml_parsor import Scxml_parsor\n'), ((399, 419), 'src.arduino_helper.generate_fsm.generate_fsm', 'generate_fsm', (['parser'], {}), '(parser)\n', (411, 419), False, 'from src.arduino_helper.generate_fsm import generate_fsm\n')] |
"""
Generate Validation Certificate bases on Azure IoT Hub Verification Code
Based on sample code from the cryptography library docs:
https://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate
"""
import datetime
from pathlib import Path
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from cryptography.x509.oid import NameOID
from config import AZURE_IOT_VERIFICATION_CODE, COMPANY_INFO, PASSPHRASE, PATH_TO_CERTS, VALID_DAYS
for key_name in PASSPHRASE.keys():
pem_data = open(f"{PATH_TO_CERTS}{key_name}/key.pem", "rb").read()
key = serialization.load_pem_private_key(pem_data, PASSPHRASE[key_name])
issuer = x509.Name(
[
x509.NameAttribute(NameOID.COUNTRY_NAME, COMPANY_INFO['COUNTRY']),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, COMPANY_INFO['STATE']),
x509.NameAttribute(NameOID.LOCALITY_NAME, COMPANY_INFO['CITY']),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, COMPANY_INFO['NAME']),
]
)
subject = x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, AZURE_IOT_VERIFICATION_CODE[key_name]),]
)
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(
datetime.datetime.utcnow()
+ datetime.timedelta(days=VALID_DAYS)
)
.sign(key, hashes.SHA256())
)
path = Path(f"{PATH_TO_CERTS}{key_name}")
path.mkdir(parents=True, exist_ok=True)
filename = f"{path}/validation_certificate.pem"
with open(filename, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
print(f"{key_name.capitalize()} Validation Cert: {filename}")
| [
"config.PASSPHRASE.keys",
"cryptography.x509.random_serial_number",
"pathlib.Path",
"datetime.datetime.utcnow",
"cryptography.x509.CertificateBuilder",
"cryptography.hazmat.primitives.hashes.SHA256",
"cryptography.hazmat.primitives.serialization.load_pem_private_key",
"datetime.timedelta",
"cryptogr... | [((571, 588), 'config.PASSPHRASE.keys', 'PASSPHRASE.keys', ([], {}), '()\n', (586, 588), False, 'from config import AZURE_IOT_VERIFICATION_CODE, COMPANY_INFO, PASSPHRASE, PATH_TO_CERTS, VALID_DAYS\n'), ((671, 737), 'cryptography.hazmat.primitives.serialization.load_pem_private_key', 'serialization.load_pem_private_key', (['pem_data', 'PASSPHRASE[key_name]'], {}), '(pem_data, PASSPHRASE[key_name])\n', (705, 737), False, 'from cryptography.hazmat.primitives import serialization\n'), ((1665, 1699), 'pathlib.Path', 'Path', (['f"""{PATH_TO_CERTS}{key_name}"""'], {}), "(f'{PATH_TO_CERTS}{key_name}')\n", (1669, 1699), False, 'from pathlib import Path\n'), ((1631, 1646), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (1644, 1646), False, 'from cryptography.hazmat.primitives import hashes\n'), ((785, 850), 'cryptography.x509.NameAttribute', 'x509.NameAttribute', (['NameOID.COUNTRY_NAME', "COMPANY_INFO['COUNTRY']"], {}), "(NameOID.COUNTRY_NAME, COMPANY_INFO['COUNTRY'])\n", (803, 850), False, 'from cryptography import x509\n'), ((864, 937), 'cryptography.x509.NameAttribute', 'x509.NameAttribute', (['NameOID.STATE_OR_PROVINCE_NAME', "COMPANY_INFO['STATE']"], {}), "(NameOID.STATE_OR_PROVINCE_NAME, COMPANY_INFO['STATE'])\n", (882, 937), False, 'from cryptography import x509\n'), ((951, 1014), 'cryptography.x509.NameAttribute', 'x509.NameAttribute', (['NameOID.LOCALITY_NAME', "COMPANY_INFO['CITY']"], {}), "(NameOID.LOCALITY_NAME, COMPANY_INFO['CITY'])\n", (969, 1014), False, 'from cryptography import x509\n'), ((1028, 1095), 'cryptography.x509.NameAttribute', 'x509.NameAttribute', (['NameOID.ORGANIZATION_NAME', "COMPANY_INFO['NAME']"], {}), "(NameOID.ORGANIZATION_NAME, COMPANY_INFO['NAME'])\n", (1046, 1095), False, 'from cryptography import x509\n'), ((1148, 1226), 'cryptography.x509.NameAttribute', 'x509.NameAttribute', (['NameOID.COMMON_NAME', 'AZURE_IOT_VERIFICATION_CODE[key_name]'], {}), '(NameOID.COMMON_NAME, AZURE_IOT_VERIFICATION_CODE[key_name])\n', (1166, 1226), False, 'from cryptography import x509\n'), ((1525, 1551), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1549, 1551), False, 'import datetime\n'), ((1566, 1601), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'VALID_DAYS'}), '(days=VALID_DAYS)\n', (1584, 1601), False, 'import datetime\n'), ((1459, 1485), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1483, 1485), False, 'import datetime\n'), ((1404, 1431), 'cryptography.x509.random_serial_number', 'x509.random_serial_number', ([], {}), '()\n', (1429, 1431), False, 'from cryptography import x509\n'), ((1257, 1282), 'cryptography.x509.CertificateBuilder', 'x509.CertificateBuilder', ([], {}), '()\n', (1280, 1282), False, 'from cryptography import x509\n')] |
import numpy as np
import tensorflow as tf
def deconv_layer(output_shape, filter_shape, activation, strides, name):
scale = 1.0 / np.prod(filter_shape[:3])
seed = int(np.random.randint(0, 1000)) # 123
with tf.name_scope('conv_mnist/conv'):
W = tf.Variable(tf.random_uniform(filter_shape,
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed), name = name+ '_W')
b = tf.Variable(tf.zeros([filter_shape[-2]]), name=name + '_b') # use output channel
def apply(x):
output_shape_x = (x.get_shape().as_list()[0],) + output_shape
a = tf.nn.conv2d_transpose(x, W, output_shape_x, strides, 'SAME') + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply
def generator(dimH=500, dimZ=32, name='generator'):
# now construct a decoder
input_shape = (28, 28, 1)
filter_width = 5
decoder_input_shape = [(4, 4, 32), (7, 7, 32), (14, 14, 16)]
decoder_input_shape.append(input_shape)
fc_layers = [dimZ, dimH, int(np.prod(decoder_input_shape[0]))]
l = 0
# first include the MLP
mlp_layers = []
N_layers = len(fc_layers) - 1
for i in np.arange(0, N_layers):
name_layer = name + '_mlp_l%d' % l
mlp_layers.append(mlp_layer(fc_layers[i], fc_layers[i + 1], 'relu', name_layer))
l += 1
conv_layers = []
N_layers = len(decoder_input_shape) - 1
for i in np.arange(0, N_layers):
if i < N_layers - 1:
activation = 'relu'
else:
activation = 'linear'
name_layer = name + '_conv_l%d' % l
output_shape = decoder_input_shape[i + 1]
input_shape = decoder_input_shape[i]
up_height = int(np.ceil(output_shape[0] / float(input_shape[0])))
up_width = int(np.ceil(output_shape[1] / float(input_shape[1])))
strides = (1, up_height, up_width, 1)
filter_shape = (filter_width, filter_width, output_shape[-1], input_shape[-1])
conv_layers.append(deconv_layer(output_shape, filter_shape, activation, \
strides, name_layer))
l += 1
print('decoder architecture', fc_layers, 'reshape', decoder_input_shape)
def apply(z):
x = z
for layer in mlp_layers:
x = layer(x)
x = tf.reshape(x, (x.get_shape().as_list()[0],) + decoder_input_shape[0])
for layer in conv_layers:
x = layer(x)
return x
return apply
def init_weights(input_size, output_size, constant=1.0, seed=123):
""" Glorot and Bengio, 2010's initialization of network weights"""
scale = constant * np.sqrt(6.0 / (input_size + output_size))
if output_size > 0:
return tf.random_uniform((input_size, output_size),
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed)
else:
return tf.random_uniform([input_size],
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed)
def mlp_layer(d_in, d_out, activation, name):
with tf.name_scope('conv_mnist/mlp'):
W = tf.Variable(init_weights(d_in, d_out), name=name + '_W')
b = tf.Variable(tf.zeros([d_out]), name=name + '_b')
def apply_layer(x):
a = tf.matmul(x, W) + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply_layer
def get_parameters():
return tf.trainable_variables('conv_mnist')
################################## Conv Encoder ##############################
def conv_layer(filter_shape, activation, strides, name):
scale = 1.0 / np.prod(filter_shape[:3])
seed = int(np.random.randint(0, 1000)) # 123
W = tf.Variable(tf.random_uniform(filter_shape,
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed), name=name + '_W')
b = tf.Variable(tf.zeros([filter_shape[-1]]), name=name + '_b')
def apply(x):
a = tf.nn.conv2d(x, W, strides, 'SAME') + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply
def construct_filter_shapes(layer_channels, filter_width=5):
filter_shapes = []
for n_channel in layer_channels:
shape = (n_channel, filter_width, filter_width)
filter_shapes.append(shape)
return filter_shapes
def encoder_convnet(input_shape, dimH=500, dimZ=32, name='conv_encoder'):
# encoder for z (low res)
layer_channels = [input_shape[-1], 16, 32, 32]
filter_width = 5
fc_layer_sizes = [dimH]
conv_layers = []
N_layers = len(layer_channels) - 1
strides = (1, 2, 2, 1)
activation = 'relu'
l = 0
print_shapes = []
for i in range(N_layers):
name_layer = name + '_conv_l%d' % l
filter_shape = (filter_width, filter_width, layer_channels[i], layer_channels[i + 1])
print_shapes.append(filter_shape)
conv_layers.append(conv_layer(filter_shape, activation, strides, name_layer))
l += 1
# fc_layer = [int(np.prod(filter_shape)), dimH, dimZ * 2]
fc_layer = [512, dimH, dimZ*2]
print(fc_layer)
enc_mlp = []
for i in range(len(fc_layer) - 1):
if i + 2 < len(fc_layer):
activation = 'relu'
else:
activation = 'linear'
name_layer = name + '_mlp_l%d' % l
enc_mlp.append(mlp_layer2(fc_layer[i], fc_layer[i + 1], activation, name_layer))
print(fc_layer[i], fc_layer[i + 1])
l += 1
print('encoder architecture', print_shapes, 'reshape', fc_layer)
def apply(x):
out = x
for layer in conv_layers:
out = layer(out)
print(out)
out = tf.reshape(out, (out.get_shape().as_list()[0], -1))
print(out)
for layer in enc_mlp:
out = layer(out)
mu, log_sig = tf.split(out, 2, axis=1)
return mu, log_sig
return apply
def mlp_layer2(d_in, d_out, activation, name):
with tf.name_scope('conv_mnist/mlp2'):
W = tf.Variable(init_weights(d_in, d_out), name=name + '_W')
b = tf.Variable(tf.zeros([d_out]), name=name + '_b')
def apply_layer(x):
a = tf.matmul(x, W) + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply_layer
def sample_gaussian(mu, log_sig):
return mu + tf.exp(log_sig) * tf.random_normal(mu.get_shape())
| [
"tensorflow.nn.conv2d",
"numpy.prod",
"numpy.sqrt",
"tensorflow.nn.relu",
"tensorflow.split",
"tensorflow.random_uniform",
"numpy.random.randint",
"tensorflow.nn.sigmoid",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.nn.conv2d_transpose",
"tensorflow.trainable_variables",
"tenso... | [((1354, 1376), 'numpy.arange', 'np.arange', (['(0)', 'N_layers'], {}), '(0, N_layers)\n', (1363, 1376), True, 'import numpy as np\n'), ((1604, 1626), 'numpy.arange', 'np.arange', (['(0)', 'N_layers'], {}), '(0, N_layers)\n', (1613, 1626), True, 'import numpy as np\n'), ((3784, 3820), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""conv_mnist"""'], {}), "('conv_mnist')\n", (3806, 3820), True, 'import tensorflow as tf\n'), ((136, 161), 'numpy.prod', 'np.prod', (['filter_shape[:3]'], {}), '(filter_shape[:3])\n', (143, 161), True, 'import numpy as np\n'), ((177, 203), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (194, 203), True, 'import numpy as np\n'), ((221, 253), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv_mnist/conv"""'], {}), "('conv_mnist/conv')\n", (234, 253), True, 'import tensorflow as tf\n'), ((2824, 2865), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (input_size + output_size))'], {}), '(6.0 / (input_size + output_size))\n', (2831, 2865), True, 'import numpy as np\n'), ((2905, 3011), 'tensorflow.random_uniform', 'tf.random_uniform', (['(input_size, output_size)'], {'minval': '(-scale)', 'maxval': 'scale', 'dtype': 'tf.float32', 'seed': 'seed'}), '((input_size, output_size), minval=-scale, maxval=scale,\n dtype=tf.float32, seed=seed)\n', (2922, 3011), True, 'import tensorflow as tf\n'), ((3099, 3193), 'tensorflow.random_uniform', 'tf.random_uniform', (['[input_size]'], {'minval': '(-scale)', 'maxval': 'scale', 'dtype': 'tf.float32', 'seed': 'seed'}), '([input_size], minval=-scale, maxval=scale, dtype=tf.\n float32, seed=seed)\n', (3116, 3193), True, 'import tensorflow as tf\n'), ((3312, 3343), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv_mnist/mlp"""'], {}), "('conv_mnist/mlp')\n", (3325, 3343), True, 'import tensorflow as tf\n'), ((3977, 4002), 'numpy.prod', 'np.prod', (['filter_shape[:3]'], {}), '(filter_shape[:3])\n', (3984, 4002), True, 'import numpy as np\n'), ((4018, 4044), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (4035, 4044), True, 'import numpy as np\n'), ((4073, 4167), 'tensorflow.random_uniform', 'tf.random_uniform', (['filter_shape'], {'minval': '(-scale)', 'maxval': 'scale', 'dtype': 'tf.float32', 'seed': 'seed'}), '(filter_shape, minval=-scale, maxval=scale, dtype=tf.\n float32, seed=seed)\n', (4090, 4167), True, 'import tensorflow as tf\n'), ((4278, 4306), 'tensorflow.zeros', 'tf.zeros', (['[filter_shape[-1]]'], {}), '([filter_shape[-1]])\n', (4286, 4306), True, 'import tensorflow as tf\n'), ((6346, 6370), 'tensorflow.split', 'tf.split', (['out', '(2)'], {'axis': '(1)'}), '(out, 2, axis=1)\n', (6354, 6370), True, 'import tensorflow as tf\n'), ((6473, 6505), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv_mnist/mlp2"""'], {}), "('conv_mnist/mlp2')\n", (6486, 6505), True, 'import tensorflow as tf\n'), ((279, 373), 'tensorflow.random_uniform', 'tf.random_uniform', (['filter_shape'], {'minval': '(-scale)', 'maxval': 'scale', 'dtype': 'tf.float32', 'seed': 'seed'}), '(filter_shape, minval=-scale, maxval=scale, dtype=tf.\n float32, seed=seed)\n', (296, 373), True, 'import tensorflow as tf\n'), ((489, 517), 'tensorflow.zeros', 'tf.zeros', (['[filter_shape[-2]]'], {}), '([filter_shape[-2]])\n', (497, 517), True, 'import tensorflow as tf\n'), ((660, 721), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'W', 'output_shape_x', 'strides', '"""SAME"""'], {}), "(x, W, output_shape_x, strides, 'SAME')\n", (682, 721), True, 'import tensorflow as tf\n'), ((778, 791), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (788, 791), True, 'import tensorflow as tf\n'), ((847, 863), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['a'], {}), '(a)\n', (860, 863), True, 'import tensorflow as tf\n'), ((1215, 1246), 'numpy.prod', 'np.prod', (['decoder_input_shape[0]'], {}), '(decoder_input_shape[0])\n', (1222, 1246), True, 'import numpy as np\n'), ((3438, 3455), 'tensorflow.zeros', 'tf.zeros', (['[d_out]'], {}), '([d_out])\n', (3446, 3455), True, 'import tensorflow as tf\n'), ((3512, 3527), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (3521, 3527), True, 'import tensorflow as tf\n'), ((3584, 3597), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (3594, 3597), True, 'import tensorflow as tf\n'), ((3653, 3669), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['a'], {}), '(a)\n', (3666, 3669), True, 'import tensorflow as tf\n'), ((4357, 4392), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W', 'strides', '"""SAME"""'], {}), "(x, W, strides, 'SAME')\n", (4369, 4392), True, 'import tensorflow as tf\n'), ((4449, 4462), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (4459, 4462), True, 'import tensorflow as tf\n'), ((4518, 4534), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['a'], {}), '(a)\n', (4531, 4534), True, 'import tensorflow as tf\n'), ((6600, 6617), 'tensorflow.zeros', 'tf.zeros', (['[d_out]'], {}), '([d_out])\n', (6608, 6617), True, 'import tensorflow as tf\n'), ((6674, 6689), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (6683, 6689), True, 'import tensorflow as tf\n'), ((6746, 6759), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (6756, 6759), True, 'import tensorflow as tf\n'), ((6815, 6831), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['a'], {}), '(a)\n', (6828, 6831), True, 'import tensorflow as tf\n'), ((6964, 6979), 'tensorflow.exp', 'tf.exp', (['log_sig'], {}), '(log_sig)\n', (6970, 6979), True, 'import tensorflow as tf\n')] |
import random
a1 = str(input(' diga o nome do aluno 1 '))
a2 = str(input(' diga o nome do aluno 2 '))
a3 = str(input(' diga o nome do aluno 3 '))
a4 = str(input(' diga o nome do aluno 4 '))
lista = [a1, a2, a3, a4]
escolhido = random.choice(lista)
print('O aluno soteado é o aluno {}'.format(escolhido))
| [
"random.choice"
] | [((230, 250), 'random.choice', 'random.choice', (['lista'], {}), '(lista)\n', (243, 250), False, 'import random\n')] |
import typing
from collections import Counter
import numpy as np
from pytest import approx
from zero_play.connect4.game import Connect4State
from zero_play.game_state import GameState
from zero_play.heuristic import Heuristic
from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager
from zero_play.playout import Playout
from zero_play.tictactoe.state import TicTacToeState
class FirstChoiceHeuristic(Heuristic):
def get_summary(self) -> typing.Sequence[str]:
return 'first choice',
def analyse(self, board: GameState) -> typing.Tuple[float, np.ndarray]:
policy = self.get_policy(board)
player = board.get_active_player()
if board.is_win(player):
value = 1.0
elif board.is_win(-player):
value = -1.0
else:
value = 0.0
return value, policy
def get_policy(self, board: GameState):
valid_moves = board.get_valid_moves()
if valid_moves.any():
first_valid = np.nonzero(valid_moves)[0][0]
else:
first_valid = 0
policy = np.zeros_like(valid_moves)
policy[first_valid] = 1.0
return policy
class EarlyChoiceHeuristic(FirstChoiceHeuristic):
""" Thinks each move is 90% as good as the previous option. """
def get_summary(self) -> typing.Sequence[str]:
return 'early choice',
def get_policy(self, board: GameState):
valid_moves = board.get_valid_moves()
if not valid_moves.any():
valid_moves = (valid_moves == 0)
raw_policy = np.multiply(valid_moves, 0.9 ** np.arange(len(valid_moves)))
policy = raw_policy / raw_policy.sum()
return policy
def test_repr():
board_text = """\
.O.
.X.
...
"""
board = TicTacToeState(board_text)
expected_repr = "SearchNode(TicTacToeState(spaces=array([[0, -1, 0], [0, 1, 0], [0, 0, 0]])))"
node = SearchNode(board)
node_repr = repr(node)
assert node_repr == expected_repr
def test_eq():
board1 = TicTacToeState()
board2 = TicTacToeState()
board3 = TicTacToeState("""\
...
.X.
...
""")
node1 = SearchNode(board1)
node2 = SearchNode(board2)
node3 = SearchNode(board3)
assert node1 == node2
assert node1 != node3
assert node1 != 42
def test_default_board():
expected_board = TicTacToeState()
expected_node = SearchNode(expected_board)
node = SearchNode(expected_board)
assert expected_node == node
def test_select_leaf_self():
game = TicTacToeState()
node = SearchNode(game)
expected_leaf = node
leaf = node.select_leaf()
assert expected_leaf == leaf
def test_select_first_child():
start_state = TicTacToeState()
expected_leaf_board = start_state.make_move(0)
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
node.record_value(1)
leaf = node.select_leaf()
assert leaf == expected_leaf
assert node.average_value == -1.0
def test_select_second_child():
start_state = TicTacToeState()
expected_leaf_board = start_state.make_move(1)
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
node.select_leaf().record_value(0)
node.select_leaf().record_value(0)
leaf = node.select_leaf()
assert leaf == expected_leaf
assert node.average_value == 0
def test_select_grandchild():
start_state = TicTacToeState()
expected_leaf_board = TicTacToeState("""\
XO.
...
...
""")
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
for _ in range(10):
node.select_leaf().record_value(0)
leaf = node.select_leaf()
assert leaf == expected_leaf
def test_select_good_grandchild():
start_state = TicTacToeState()
node = SearchNode(start_state)
node.select_leaf().record_value(0) # Root node returns itself.
node.select_leaf().record_value(0) # Move 0 AT 1A, value is a tie.
node.select_leaf().record_value(-1) # Move 1 AT 1B, value is a win.
# Expect it to exploit the win at 1B, and try the first grandchild at 1A.
expected_leaf_board = TicTacToeState("""\
ABC
1 OX.
2 ...
3 ...
""")
expected_leaf = SearchNode(expected_leaf_board)
leaf = node.select_leaf()
assert leaf == expected_leaf
def test_select_no_children():
start_board = TicTacToeState("""\
XOX
OOX
.XO
""")
expected_leaf_board = TicTacToeState("""\
XOX
OOX
XXO
""")
expected_leaf = SearchNode(expected_leaf_board)
start_node = SearchNode(start_board)
leaf1 = start_node.select_leaf()
leaf1.record_value(1)
leaf2 = start_node.select_leaf()
leaf2.record_value(1)
leaf3 = start_node.select_leaf()
assert leaf1 == start_node
assert leaf2 == expected_leaf
assert leaf3 == expected_leaf
def test_choose_move():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
expected_display = """\
.......
.......
.......
..XXX..
OXOXO..
XOXOXOO
"""
player = MctsPlayer(start_state, iteration_count=200)
move = player.choose_move(state1)
state2 = state1.make_move(move)
display = state2.display()
assert display == expected_display
def test_choose_move_in_pool():
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
player = MctsPlayer(start_state, iteration_count=200, process_count=2)
valid_moves = start_state.get_valid_moves()
move = player.choose_move(state1)
# Can't rely on which move, because other process has separate random seed.
assert valid_moves[move]
def test_choose_moves_at_random():
""" Early moves are chosen from a weighted random population. """
np.random.seed(0)
start_state = TicTacToeState()
state1 = TicTacToeState("""\
...
...
X..
""")
player = MctsPlayer(start_state,
iteration_count=80,
heuristic=EarlyChoiceHeuristic())
moves = set()
for _ in range(10):
move = player.choose_move(state1)
moves.add(move)
player.search_manager.reset()
assert 1 < len(moves)
def test_choose_move_no_iterations():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
test_count = 400
expected_count = test_count/7
expected_low = expected_count * 0.9
expected_high = expected_count * 1.1
move_counts = Counter()
for _ in range(test_count):
player = MctsPlayer(start_state, iteration_count=0)
move = player.choose_move(state1)
move_counts[move] += 1
assert expected_low < move_counts[2] < expected_high
def test_analyse_finished_game():
board = TicTacToeState("""\
OXO
XXO
XOX
""")
heuristic = Playout()
expected_value = 0 # A tie
expected_policy = [1/9] * 9
value, policy = heuristic.analyse(board)
assert expected_value == value
assert expected_policy == policy.tolist()
def test_search_manager_reuses_node():
start_state = TicTacToeState()
manager = SearchManager(start_state, Playout())
manager.search(start_state, iterations=10)
move = manager.get_best_move()
state2 = start_state.make_move(move)
node = manager.current_node
first_value_count = node.value_count
manager.search(state2, iterations=10)
second_value_count = node.value_count
assert first_value_count > 0
assert first_value_count + 10 == second_value_count
def test_search_manager_with_opponent():
""" Like when opponent is not sharing the SearchManager. """
start_state = TicTacToeState()
manager = SearchManager(start_state, Playout())
manager.search(start_state, iterations=10)
node = manager.current_node.children[0] # Didn't call get_best_move().
move = 0
state2 = start_state.make_move(move)
first_value_count = node.value_count
manager.search(state2, iterations=10)
second_value_count = node.value_count
assert first_value_count > 0
assert first_value_count + 10 == second_value_count
def test_annotate():
start_state = TicTacToeState()
player = MctsPlayer(start_state,
iteration_count=10,
heuristic=FirstChoiceHeuristic())
player.choose_move(start_state)
move_probabilities = player.get_move_probabilities(start_state)
best_move, best_probability, best_count, best_value = move_probabilities[0]
assert best_move == '1A'
assert best_probability == approx(0.999013)
assert best_count == 9
assert best_value == approx(2/9)
def test_create_training_data():
start_state = TicTacToeState()
manager = SearchManager(start_state, FirstChoiceHeuristic())
expected_boards, expected_outputs = zip(*[
[start_state.get_spaces(),
np.array([1., 0., 0., 0., 0., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
X..
...
...
""").get_spaces(), np.array([0., 1., 0., 0., 0., 0., 0., 0., 0., 1.])],
[TicTacToeState("""\
XO.
...
...
""").get_spaces(), np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
XOX
...
...
""").get_spaces(), np.array([0., 0., 0., 1., 0., 0., 0., 0., 0., 1.])],
[TicTacToeState("""\
XOX
O..
...
""").get_spaces(), np.array([0., 0., 0., 0., 1., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
XOX
OX.
...
""").get_spaces(), np.array([0., 0., 0., 0., 0., 1., 0., 0., 0., 1.])],
[TicTacToeState("""\
XOX
OXO
...
""").get_spaces(), np.array([0., 0., 0., 0., 0., 0., 1., 0., 0., -1.])]])
expected_boards = np.stack(expected_boards)
expected_outputs = np.stack(expected_outputs)
boards, outputs = manager.create_training_data(iterations=1, data_size=7)
assert repr(boards) == repr(expected_boards)
assert repr(outputs) == repr(expected_outputs)
def test_win_scores_one():
""" Expose bug where search continues after a game-ending position. """
state1 = TicTacToeState("""\
..X
XX.
OO.
""")
player = MctsPlayer(TicTacToeState(), state1.X_PLAYER, iteration_count=100)
move = player.choose_move(state1)
search_node1 = player.search_manager.current_node.parent
for child_node in search_node1.children:
if child_node.move == 8:
assert child_node.average_value == 1.0
assert move == 8
def test_choose_move_sets_current_node():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
.......
OXOXOXO
XOXOXOX
""")
player = MctsPlayer(start_state, iteration_count=20)
move1 = player.choose_move(state1)
current_node1 = player.search_manager.current_node
state2 = state1.make_move(move1)
move2 = player.choose_move(state2)
current_node2 = player.search_manager.current_node
state3 = state2.make_move(move2)
assert current_node1.game_state == state2
assert current_node2.game_state == state3
| [
"pytest.approx",
"zero_play.playout.Playout",
"zero_play.tictactoe.state.TicTacToeState",
"zero_play.mcts_player.MctsPlayer",
"zero_play.connect4.game.Connect4State",
"collections.Counter",
"numpy.stack",
"zero_play.mcts_player.SearchNode",
"numpy.array",
"numpy.random.seed",
"numpy.nonzero",
... | [((1770, 1796), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['board_text'], {}), '(board_text)\n', (1784, 1796), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((1908, 1925), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['board'], {}), '(board)\n', (1918, 1925), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2022, 2038), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2036, 2038), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2052, 2068), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2066, 2068), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2082, 2115), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""...\n.X.\n...\n"""'], {}), "('...\\n.X.\\n...\\n')\n", (2096, 2115), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2132, 2150), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['board1'], {}), '(board1)\n', (2142, 2150), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2163, 2181), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['board2'], {}), '(board2)\n', (2173, 2181), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2194, 2212), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['board3'], {}), '(board3)\n', (2204, 2212), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2338, 2354), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2352, 2354), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2375, 2401), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_board'], {}), '(expected_board)\n', (2385, 2401), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2414, 2440), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_board'], {}), '(expected_board)\n', (2424, 2440), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2517, 2533), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2531, 2533), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2545, 2561), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['game'], {}), '(game)\n', (2555, 2561), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2703, 2719), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2717, 2719), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2791, 2822), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (2801, 2822), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2835, 2858), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_state'], {}), '(start_state)\n', (2845, 2858), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3039, 3055), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (3053, 3055), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((3127, 3158), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (3137, 3158), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3171, 3194), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_state'], {}), '(start_state)\n', (3181, 3194), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3423, 3439), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (3437, 3439), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((3466, 3499), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XO.\n...\n...\n"""'], {}), "('XO.\\n...\\n...\\n')\n", (3480, 3499), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((3523, 3554), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (3533, 3554), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3567, 3590), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_state'], {}), '(start_state)\n', (3577, 3590), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3778, 3794), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (3792, 3794), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((3806, 3829), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_state'], {}), '(start_state)\n', (3816, 3829), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((4147, 4193), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['""" ABC\n1 OX.\n2 ...\n3 ...\n"""'], {}), '(""" ABC\n1 OX.\n2 ...\n3 ...\n""")\n', (4161, 4193), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((4216, 4247), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (4226, 4247), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((4364, 4397), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nOOX\n.XO\n"""'], {}), "('XOX\\nOOX\\n.XO\\n')\n", (4378, 4397), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((4427, 4460), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nOOX\nXXO\n"""'], {}), "('XOX\\nOOX\\nXXO\\n')\n", (4441, 4460), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((4484, 4515), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (4494, 4515), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((4534, 4557), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_board'], {}), '(start_board)\n', (4544, 4557), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((4851, 4868), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4865, 4868), True, 'import numpy as np\n'), ((4887, 4902), 'zero_play.connect4.game.Connect4State', 'Connect4State', ([], {}), '()\n', (4900, 4902), False, 'from zero_play.connect4.game import Connect4State\n'), ((4916, 4985), 'zero_play.connect4.game.Connect4State', 'Connect4State', (['""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n"""'], {}), '(""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n""")\n', (4929, 4985), False, 'from zero_play.connect4.game import Connect4State\n'), ((5081, 5125), 'zero_play.mcts_player.MctsPlayer', 'MctsPlayer', (['start_state'], {'iteration_count': '(200)'}), '(start_state, iteration_count=200)\n', (5091, 5125), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((5324, 5339), 'zero_play.connect4.game.Connect4State', 'Connect4State', ([], {}), '()\n', (5337, 5339), False, 'from zero_play.connect4.game import Connect4State\n'), ((5353, 5422), 'zero_play.connect4.game.Connect4State', 'Connect4State', (['""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n"""'], {}), '(""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n""")\n', (5366, 5422), False, 'from zero_play.connect4.game import Connect4State\n'), ((5438, 5499), 'zero_play.mcts_player.MctsPlayer', 'MctsPlayer', (['start_state'], {'iteration_count': '(200)', 'process_count': '(2)'}), '(start_state, iteration_count=200, process_count=2)\n', (5448, 5499), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((5808, 5825), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5822, 5825), True, 'import numpy as np\n'), ((5844, 5860), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (5858, 5860), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((5874, 5907), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""...\n...\nX..\n"""'], {}), "('...\\n...\\nX..\\n')\n", (5888, 5907), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((6268, 6285), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6282, 6285), True, 'import numpy as np\n'), ((6304, 6319), 'zero_play.connect4.game.Connect4State', 'Connect4State', ([], {}), '()\n', (6317, 6319), False, 'from zero_play.connect4.game import Connect4State\n'), ((6333, 6402), 'zero_play.connect4.game.Connect4State', 'Connect4State', (['""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n"""'], {}), '(""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n""")\n', (6346, 6402), False, 'from zero_play.connect4.game import Connect4State\n'), ((6559, 6568), 'collections.Counter', 'Counter', ([], {}), '()\n', (6566, 6568), False, 'from collections import Counter\n'), ((6841, 6874), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""OXO\nXXO\nXOX\n"""'], {}), "('OXO\\nXXO\\nXOX\\n')\n", (6855, 6874), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((6894, 6903), 'zero_play.playout.Playout', 'Playout', ([], {}), '()\n', (6901, 6903), False, 'from zero_play.playout import Playout\n'), ((7155, 7171), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (7169, 7171), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((7721, 7737), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (7735, 7737), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((8224, 8240), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (8238, 8240), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((8759, 8775), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (8773, 8775), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9690, 9715), 'numpy.stack', 'np.stack', (['expected_boards'], {}), '(expected_boards)\n', (9698, 9715), True, 'import numpy as np\n'), ((9739, 9765), 'numpy.stack', 'np.stack', (['expected_outputs'], {}), '(expected_outputs)\n', (9747, 9765), True, 'import numpy as np\n'), ((10064, 10097), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""..X\nXX.\nOO.\n"""'], {}), "('..X\\nXX.\\nOO.\\n')\n", (10078, 10097), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((10481, 10498), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (10495, 10498), True, 'import numpy as np\n'), ((10517, 10532), 'zero_play.connect4.game.Connect4State', 'Connect4State', ([], {}), '()\n', (10530, 10532), False, 'from zero_play.connect4.game import Connect4State\n'), ((10546, 10615), 'zero_play.connect4.game.Connect4State', 'Connect4State', (['""".......\n.......\n.......\n.......\nOXOXOXO\nXOXOXOX\n"""'], {}), '(""".......\n.......\n.......\n.......\nOXOXOXO\nXOXOXOX\n""")\n', (10559, 10615), False, 'from zero_play.connect4.game import Connect4State\n'), ((10631, 10674), 'zero_play.mcts_player.MctsPlayer', 'MctsPlayer', (['start_state'], {'iteration_count': '(20)'}), '(start_state, iteration_count=20)\n', (10641, 10674), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((1095, 1121), 'numpy.zeros_like', 'np.zeros_like', (['valid_moves'], {}), '(valid_moves)\n', (1108, 1121), True, 'import numpy as np\n'), ((6618, 6660), 'zero_play.mcts_player.MctsPlayer', 'MctsPlayer', (['start_state'], {'iteration_count': '(0)'}), '(start_state, iteration_count=0)\n', (6628, 6660), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((7213, 7222), 'zero_play.playout.Playout', 'Playout', ([], {}), '()\n', (7220, 7222), False, 'from zero_play.playout import Playout\n'), ((7779, 7788), 'zero_play.playout.Playout', 'Playout', ([], {}), '()\n', (7786, 7788), False, 'from zero_play.playout import Playout\n'), ((8625, 8641), 'pytest.approx', 'approx', (['(0.999013)'], {}), '(0.999013)\n', (8631, 8641), False, 'from pytest import approx\n'), ((8694, 8707), 'pytest.approx', 'approx', (['(2 / 9)'], {}), '(2 / 9)\n', (8700, 8707), False, 'from pytest import approx\n'), ((10126, 10142), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (10140, 10142), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((1006, 1029), 'numpy.nonzero', 'np.nonzero', (['valid_moves'], {}), '(valid_moves)\n', (1016, 1029), True, 'import numpy as np\n'), ((8932, 8993), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0]'], {}), '([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0])\n', (8940, 8993), True, 'import numpy as np\n'), ((9046, 9106), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n', (9054, 9106), True, 'import numpy as np\n'), ((9159, 9220), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0]'], {}), '([0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0])\n', (9167, 9220), True, 'import numpy as np\n'), ((9273, 9333), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n', (9281, 9333), True, 'import numpy as np\n'), ((9386, 9447), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0])\n', (9394, 9447), True, 'import numpy as np\n'), ((9500, 9560), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0])\n', (9508, 9560), True, 'import numpy as np\n'), ((9613, 9674), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0])\n', (9621, 9674), True, 'import numpy as np\n'), ((8995, 9028), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""X..\n...\n...\n"""'], {}), "('X..\\n...\\n...\\n')\n", (9009, 9028), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9108, 9141), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XO.\n...\n...\n"""'], {}), "('XO.\\n...\\n...\\n')\n", (9122, 9141), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9222, 9255), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\n...\n...\n"""'], {}), "('XOX\\n...\\n...\\n')\n", (9236, 9255), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9335, 9368), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nO..\n...\n"""'], {}), "('XOX\\nO..\\n...\\n')\n", (9349, 9368), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9449, 9482), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nOX.\n...\n"""'], {}), "('XOX\\nOX.\\n...\\n')\n", (9463, 9482), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9562, 9595), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nOXO\n...\n"""'], {}), "('XOX\\nOXO\\n...\\n')\n", (9576, 9595), False, 'from zero_play.tictactoe.state import TicTacToeState\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: scenes/main_menu_manager.py
# -------------------
# Divine Oasis
# Text Based RPG Game
# By wsngamerz
# -------------------
import logging
import random
from divineoasis.assets import Assets
from divineoasis.audio_manager import AudioManager
from divineoasis.scene import Scene
from divineoasis.scenes.main_menu.menu_scene import MenuScene
from divineoasis.scenes.main_menu.options_scene import OptionsScene
from pyglet.graphics import Batch, OrderedGroup
from pyglet.sprite import Sprite
from pyglet.window import Window, FPSDisplay
class MainMenu(Scene):
def __init__(self, assets: Assets, window: Window, audio_manager: AudioManager):
Scene.__init__(self, assets, window, audio_manager)
self.logger = logging.getLogger(__name__)
self.current_scene = None
self.sub_scenes = {}
self.add_sub_scene(MenuScene(self.assets, self.window, self.audio_manager))
self.add_sub_scene(OptionsScene(self.assets, self.window, self.audio_manager))
self.fps_display = FPSDisplay(self.window)
self.fps_display.label.y = 680
self.fps_display.label.color = (255, 255, 255, 255)
# Background sprite & image
self.background_image = None
self.background_sprite = None
# Pos for moving background
self.bg_pos = [0, 0]
def start_scene(self):
# Load the music!
self.load_audio()
# Start the main menu scene
self.switch_sub_scene("MenuScene")
# Start fancy menu stuff
self.load_background()
self.play_audio()
def add_sub_scene(self, scene: Scene):
sub_scene_name = scene.__class__.__name__
scene.switch_sub_scene = self.switch_sub_scene
self.sub_scenes[sub_scene_name] = scene
def load_audio(self):
songs = [
"menu.ove_melaa_italo_unlimited",
"menu.ove_melaa_super_ninja_assasin",
"menu.ove_melaa_power_of_thy_yes"
]
random.shuffle(songs)
self.logger.debug(f"Loading menu songs: { songs }")
self.audio_manager.load_songs(songs, loop=True)
def play_audio(self):
self.audio_manager.play_songs()
def load_background(self):
self.background_image = self.assets.get_pyglet_image("user_interface.background")
self.initiate_background()
def initiate_background(self):
if not self.background_image:
self.load_background()
self.background_sprite = Sprite(self.background_image, x=0, y=0,
batch=self.current_scene.batch, group=self.current_scene.background)
def switch_sub_scene(self, sub_scene_name: str):
if sub_scene_name in self.sub_scenes:
self.window.remove_handlers(self.current_scene)
self.current_scene = self.sub_scenes[sub_scene_name]
self.window.push_handlers(self.current_scene)
self.initiate_background()
self.current_scene.start_scene()
def update(self, dt: float):
self.bg_pos[0] -= 2
self.bg_pos[1] -= 1
if self.bg_pos[0] <= -4800:
self.bg_pos = [0, 0]
self.background_sprite.update(self.bg_pos[0], self.bg_pos[1])
self.current_scene.update(dt)
self.fps_display.draw()
| [
"logging.getLogger",
"random.shuffle",
"pyglet.sprite.Sprite",
"pyglet.window.FPSDisplay",
"divineoasis.scenes.main_menu.menu_scene.MenuScene",
"divineoasis.scenes.main_menu.options_scene.OptionsScene",
"divineoasis.scene.Scene.__init__"
] | [((717, 768), 'divineoasis.scene.Scene.__init__', 'Scene.__init__', (['self', 'assets', 'window', 'audio_manager'], {}), '(self, assets, window, audio_manager)\n', (731, 768), False, 'from divineoasis.scene import Scene\n'), ((792, 819), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (809, 819), False, 'import logging\n'), ((1084, 1107), 'pyglet.window.FPSDisplay', 'FPSDisplay', (['self.window'], {}), '(self.window)\n', (1094, 1107), False, 'from pyglet.window import Window, FPSDisplay\n'), ((2039, 2060), 'random.shuffle', 'random.shuffle', (['songs'], {}), '(songs)\n', (2053, 2060), False, 'import random\n'), ((2543, 2655), 'pyglet.sprite.Sprite', 'Sprite', (['self.background_image'], {'x': '(0)', 'y': '(0)', 'batch': 'self.current_scene.batch', 'group': 'self.current_scene.background'}), '(self.background_image, x=0, y=0, batch=self.current_scene.batch,\n group=self.current_scene.background)\n', (2549, 2655), False, 'from pyglet.sprite import Sprite\n'), ((912, 967), 'divineoasis.scenes.main_menu.menu_scene.MenuScene', 'MenuScene', (['self.assets', 'self.window', 'self.audio_manager'], {}), '(self.assets, self.window, self.audio_manager)\n', (921, 967), False, 'from divineoasis.scenes.main_menu.menu_scene import MenuScene\n'), ((996, 1054), 'divineoasis.scenes.main_menu.options_scene.OptionsScene', 'OptionsScene', (['self.assets', 'self.window', 'self.audio_manager'], {}), '(self.assets, self.window, self.audio_manager)\n', (1008, 1054), False, 'from divineoasis.scenes.main_menu.options_scene import OptionsScene\n')] |
"""
Custom decorators
=================
Custom decorators for various tasks and to bridge Flask with Eve
"""
from flask import current_app as app, request, Response, abort
from functools import wraps
from ext.auth.tokenauth import TokenAuth
from ext.auth.helpers import Helpers
# Because of circular import in context
from ext.app.eve_helper import eve_abort
class AuthenticationFailed(Exception):
"""Raise custom error"""
class AuthenticationNoToken(Exception):
"""Raise custom error"""
def require_token(allowed_roles=None):
""" Custom decorator for token auth
Wraps the custom TokenAuth class used by Eve and sends it the required param
"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
try:
# print(request.headers.get('User-Agent'))
# No authorization in request
# Let it raise an exception
try:
authorization_token = request.authorization.get('username', None)
except Exception as e:
raise AuthenticationFailed
# Do the authentication
# Need to remove prefix + / for request.path
auth = TokenAuth()
auth_result = auth.check_auth(token=authorization_token, # Token
method=request.method,
resource=request.path[len(app.globals.get('prefix')) + 1:],
allowed_roles=allowed_roles)
if auth_result is not True:
raise AuthenticationFailed
# Catch exceptions and handle correctly
except AuthenticationFailed:
eve_abort(401, 'Please provide proper credentials')
except Exception as e:
eve_abort(500, 'Server error')
return f(*args, **kwargs)
return wrapped
return decorator
def require_superadmin():
"""Require user to be in a group of hardcoded user id's
Should use Helpers then get administrators
@TODO: use a switch for ref [superadmin, admin,..]?
@TODO: in ext.auth.helpers define a get_users_in_roles_by_ref(ref)?
"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
h = Helpers()
if int(app.globals['user_id']) not in h.get_superadmins(): # [99999]: # # #
eve_abort(401, 'You do not have sufficient privileges')
return f(*args, **kwargs)
return wrapped
return decorator
| [
"ext.auth.tokenauth.TokenAuth",
"ext.auth.helpers.Helpers",
"ext.app.eve_helper.eve_abort",
"functools.wraps",
"flask.current_app.globals.get",
"flask.request.authorization.get"
] | [((726, 734), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (731, 734), False, 'from functools import wraps\n'), ((2326, 2334), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (2331, 2334), False, 'from functools import wraps\n'), ((2389, 2398), 'ext.auth.helpers.Helpers', 'Helpers', ([], {}), '()\n', (2396, 2398), False, 'from ext.auth.helpers import Helpers\n'), ((1258, 1269), 'ext.auth.tokenauth.TokenAuth', 'TokenAuth', ([], {}), '()\n', (1267, 1269), False, 'from ext.auth.tokenauth import TokenAuth\n'), ((2504, 2559), 'ext.app.eve_helper.eve_abort', 'eve_abort', (['(401)', '"""You do not have sufficient privileges"""'], {}), "(401, 'You do not have sufficient privileges')\n", (2513, 2559), False, 'from ext.app.eve_helper import eve_abort\n'), ((1003, 1046), 'flask.request.authorization.get', 'request.authorization.get', (['"""username"""', 'None'], {}), "('username', None)\n", (1028, 1046), False, 'from flask import current_app as app, request, Response, abort\n'), ((1804, 1855), 'ext.app.eve_helper.eve_abort', 'eve_abort', (['(401)', '"""Please provide proper credentials"""'], {}), "(401, 'Please provide proper credentials')\n", (1813, 1855), False, 'from ext.app.eve_helper import eve_abort\n'), ((1907, 1937), 'ext.app.eve_helper.eve_abort', 'eve_abort', (['(500)', '"""Server error"""'], {}), "(500, 'Server error')\n", (1916, 1937), False, 'from ext.app.eve_helper import eve_abort\n'), ((1493, 1518), 'flask.current_app.globals.get', 'app.globals.get', (['"""prefix"""'], {}), "('prefix')\n", (1508, 1518), True, 'from flask import current_app as app, request, Response, abort\n')] |
from collections import OrderedDict
from graphene import Field # , annotate, ResolveInfo
from graphene.relay import Connection, Node
from graphene.types.objecttype import ObjectType, ObjectTypeOptions
from graphene.types.utils import yank_fields_from_attrs
from mongoengine import DoesNotExist
from .converter import convert_mongoengine_field
from .registry import Registry, get_global_registry
from .utils import get_document_fields, is_mongoengine_document, get_query
# pylint: disable=W0622,C0103
def construct_fields(document, registry, only_fields, exclude_fields):
fields = OrderedDict()
document_fields = get_document_fields(document)
for name, field in document_fields.items():
is_not_in_only = only_fields and name not in only_fields
is_excluded = name in exclude_fields
if is_not_in_only or is_excluded:
continue
converted_field = convert_mongoengine_field(field, registry)
print(name)
fields[name] = converted_field
# # Get all the columns for the relationships on the model
# for relationship in inspected_model.relationships:
# is_not_in_only = only_fields and relationship.key not in only_fields
# # is_already_created = relationship.key in options.fields
# is_excluded = relationship.key in exclude_fields # or is_already_created
# if is_not_in_only or is_excluded:
# # We skip this field if we specify only_fields and is not
# # in there. Or when we exclude this field in exclude_fields
# continue
# converted_relationship = convert_sqlalchemy_relationship(relationship, registry)
# name = relationship.key
# fields[name] = converted_relationship
return fields
class MongoEngineObjectTypeOptions(ObjectTypeOptions):
document = None # type: Document
registry = None # type: Registry
connection = None # type: Type[Connection]
id = None # type: str
class MongoEngineObjectType(ObjectType):
@classmethod
def __init_subclass_with_meta__(cls, document=None, registry=None, skip_registry=False,
only_fields=(), exclude_fields=(), connection=None,
use_connection=None, interfaces=(), id=None, **options):
assert is_mongoengine_document(document), (
f"You need to pass a valid MongoEngine Document in {cls.__name__}.Meta, "
f"received '{document}'."
)
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
f'The attribute registry in {cls.__name__} needs to be an instance of '
f'Registry, received "{registry}".'
)
mongoengine_fields = yank_fields_from_attrs(
construct_fields(document, registry, only_fields, exclude_fields),
_as=Field,
)
if use_connection is None and interfaces:
use_connection = any((issubclass(interface, Node) for interface in interfaces))
if use_connection and not connection:
# We create the connection automatically
connection = Connection.create_type(f'{cls.__name__}Connection', node=cls)
if connection is not None:
assert issubclass(connection, Connection), (
f'The connection must be a Connection. Received {connection.__name__}'
)
_meta = MongoEngineObjectTypeOptions(cls)
_meta.document = document
_meta.registry = registry
_meta.fields = mongoengine_fields
_meta.connection = connection
_meta.id = id or 'id'
super(MongoEngineObjectType, cls).__init_subclass_with_meta__(
_meta=_meta,
interfaces=interfaces,
**options
)
if not skip_registry:
registry.register(cls)
@classmethod
def is_type_of(cls, root, info):
if isinstance(root, cls):
return True
if not is_mongoengine_document(root):
raise Exception(f'Received incompatible instance "{root}".')
return isinstance(root, cls._meta.document)
@classmethod
def get_query(cls, info):
""" Gets QuerySet for this type's document """
document = cls._meta.document
return get_query(document, info.context)
@classmethod
def get_node(cls, info, id):
""" Returns document to wrap in Node """
try:
return cls.get_query(info).get(id)
except DoesNotExist:
return None | [
"collections.OrderedDict",
"graphene.relay.Connection.create_type"
] | [((597, 610), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (608, 610), False, 'from collections import OrderedDict\n'), ((3211, 3272), 'graphene.relay.Connection.create_type', 'Connection.create_type', (['f"""{cls.__name__}Connection"""'], {'node': 'cls'}), "(f'{cls.__name__}Connection', node=cls)\n", (3233, 3272), False, 'from graphene.relay import Connection, Node\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for binary file manipulation"""
import os
import subprocess
from . import util
from .._ffi.base import py_str
from ..api import register_func
@register_func("tvm_callback_get_section_size")
def tvm_callback_get_section_size(binary_path, section_name):
"""Finds size of the section in the binary.
Assumes `size` shell command exists (typically works only on Linux machines)
Parameters
----------
binary_path : str
path of the binary file
section_name : str
name of section
Return
------
size : integer
size of the section in bytes
"""
if not os.path.isfile(binary_path):
raise RuntimeError("no such file \"{}\"".format(binary_path))
# We use the "-A" flag here to get the ".rodata" section's size, which is
# not included by default.
size_proc = subprocess.Popen(["size", "-A", binary_path], stdout=subprocess.PIPE)
(size_output, _) = size_proc.communicate()
if size_proc.returncode != 0:
msg = "error in finding section size:\n"
msg += py_str(out)
raise RuntimeError(msg)
size_output = size_output.decode("utf-8")
section_size = 0
# Skip the first two header lines in the `size` output.
for line in size_output.split("\n")[2:]:
tokens = list(filter(lambda s: len(s) != 0, line.split(" ")))
if len(tokens) != 3:
continue
entry_name = tokens[0]
entry_size = int(tokens[1])
if entry_name.startswith("." + section_name):
# The `.rodata` section should be the only section for which we
# need to collect the size from *multiple* entries in the command
# output.
if section_size != 0 and not entry_name.startswith(".rodata"):
raise RuntimeError(
"multiple entries in `size` output for section {}".format(section_name))
section_size += entry_size
return section_size
@register_func("tvm_callback_relocate_binary")
def tvm_callback_relocate_binary(binary_path, text_addr, rodata_addr, data_addr, bss_addr):
"""Relocates sections in the binary to new addresses
Parameters
----------
binary_path : str
path of the binary file
text_addr : str
text section address
rodata_addr : str
rodata section address
data_addr : str
data section address
bss_addr : str
bss section address
Return
------
rel_bin : bytearray
the relocated binary
"""
tmp_dir = util.tempdir()
rel_obj = tmp_dir.relpath("relocated.o")
ld_script_contents = """
SECTIONS
{
. = %s;
. = ALIGN(8);
.text :
{
*(.text)
. = ALIGN(8);
*(.text*)
}
. = %s;
. = ALIGN(8);
.rodata :
{
*(.rodata)
. = ALIGN(8);
*(.rodata*)
}
. = %s;
. = ALIGN(8);
.data :
{
*(.data)
. = ALIGN(8);
*(.data*)
}
. = %s;
. = ALIGN(8);
.bss :
{
*(.bss)
. = ALIGN(8);
*(.bss*)
}
}
""" % (text_addr, rodata_addr, data_addr, bss_addr)
rel_ld_script = tmp_dir.relpath("relocated.lds")
with open(rel_ld_script, "w") as f:
f.write(ld_script_contents)
ld_proc = subprocess.Popen(["ld", binary_path,
"-T", rel_ld_script,
"-o", rel_obj],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = ld_proc.communicate()
if ld_proc.returncode != 0:
msg = "linking error using ld:\n"
msg += py_str(out)
raise RuntimeError(msg)
with open(rel_obj, "rb") as f:
rel_bin = bytearray(f.read())
return rel_bin
@register_func("tvm_callback_read_binary_section")
def tvm_callback_read_binary_section(binary, section):
"""Returns the contents of the specified section in the binary byte array
Parameters
----------
binary : bytearray
contents of the binary
section : str
type of section
Return
------
section_bin : bytearray
contents of the read section
"""
tmp_dir = util.tempdir()
tmp_bin = tmp_dir.relpath("temp.bin")
tmp_section = tmp_dir.relpath("tmp_section.bin")
with open(tmp_bin, "wb") as out_file:
out_file.write(bytes(binary))
objcopy_proc = subprocess.Popen(["objcopy", "--dump-section",
".{}={}".format(section, tmp_section),
tmp_bin],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = objcopy_proc.communicate()
if objcopy_proc.returncode != 0:
msg = "error in using objcopy:\n"
msg += py_str(out)
raise RuntimeError(msg)
if os.path.isfile(tmp_section):
# Get section content if it exists.
with open(tmp_section, "rb") as f:
section_bin = bytearray(f.read())
else:
# Return empty bytearray if the section does not exist.
section_bin = bytearray("", "utf-8")
return section_bin
@register_func("tvm_callback_get_symbol_map")
def tvm_callback_get_symbol_map(binary):
"""Obtains a map of symbols to addresses in the passed binary
Parameters
----------
binary : bytearray
contents of the binary
Return
------
map_str : str
map of defined symbols to addresses, encoded as a series of
alternating newline-separated keys and values
"""
tmp_dir = util.tempdir()
tmp_obj = tmp_dir.relpath("tmp_obj.bin")
with open(tmp_obj, "wb") as out_file:
out_file.write(bytes(binary))
nm_proc = subprocess.Popen(["nm", "-C", "--defined-only", tmp_obj],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = nm_proc.communicate()
if nm_proc.returncode != 0:
msg = "error in using nm:\n"
msg += py_str(out)
raise RuntimeError(msg)
out = out.decode("utf8").splitlines()
map_str = ""
for line in out:
line = line.split()
map_str += line[2] + "\n"
map_str += line[0] + "\n"
return map_str
| [
"os.path.isfile",
"subprocess.Popen"
] | [((1637, 1706), 'subprocess.Popen', 'subprocess.Popen', (["['size', '-A', binary_path]"], {'stdout': 'subprocess.PIPE'}), "(['size', '-A', binary_path], stdout=subprocess.PIPE)\n", (1653, 1706), False, 'import subprocess\n'), ((3996, 4123), 'subprocess.Popen', 'subprocess.Popen', (["['ld', binary_path, '-T', rel_ld_script, '-o', rel_obj]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['ld', binary_path, '-T', rel_ld_script, '-o', rel_obj],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n", (4012, 4123), False, 'import subprocess\n'), ((5620, 5647), 'os.path.isfile', 'os.path.isfile', (['tmp_section'], {}), '(tmp_section)\n', (5634, 5647), False, 'import os\n'), ((6503, 6615), 'subprocess.Popen', 'subprocess.Popen', (["['nm', '-C', '--defined-only', tmp_obj]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['nm', '-C', '--defined-only', tmp_obj], stdout=subprocess\n .PIPE, stderr=subprocess.STDOUT)\n", (6519, 6615), False, 'import subprocess\n'), ((1413, 1440), 'os.path.isfile', 'os.path.isfile', (['binary_path'], {}), '(binary_path)\n', (1427, 1440), False, 'import os\n')] |
import card_dispenser
import time
card_dispenser_object = card_dispenser.card_dispenser()
while True:
card_dispenser_object.give_card()
time.sleep(10) | [
"card_dispenser.card_dispenser",
"time.sleep"
] | [((59, 90), 'card_dispenser.card_dispenser', 'card_dispenser.card_dispenser', ([], {}), '()\n', (88, 90), False, 'import card_dispenser\n'), ((145, 159), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (155, 159), False, 'import time\n')] |
# -*- coding: utf-8 -*-
import logging
from typing import Any, Dict, List, Mapping
try:
from collections import Mapping as CollectionsMapping
except ImportError:
from collections.abc import Mapping as CollectionsMapping
from brewtils.models import Parameter, Resolvable
from brewtils.resolvers.bytes import BytesResolver
from brewtils.resolvers.chunks import ChunksResolver
from brewtils.resolvers.identity import IdentityResolver
from brewtils.schema_parser import SchemaParser
def build_resolver_map(easy_client=None):
"""Builds all resolvers"""
return [
IdentityResolver(), # This should always be first
BytesResolver(easy_client),
ChunksResolver(easy_client),
]
class ResolutionManager(object):
"""Parameter resolution manager
This class is used under-the-hood for various plugin functions. Its purpose is to
remove all the various cleanup and housekeeping steps involved in resolving
parameters. An example of an unresolved parameter is a dictionary which represents a
bytes object. In this case the user wants the open file descriptor, not the random
dictionary that they don't know how to process. The parameter resolver helps handle
these scenarios.
This is intended for internal use for the plugin class.
"""
def __init__(self, **kwargs):
self.logger = logging.getLogger(__name__)
self.resolvers = build_resolver_map(**kwargs)
def resolve(self, values, definitions=None, upload=True):
# type: (Mapping[str, Any], List[Parameter], bool) -> Dict[str, Any]
"""Iterate through parameters, resolving as necessary
Args:
values: Dictionary of request parameter values
definitions: Parameter definitions
upload: Controls which methods will be called on resolvers
Returns:
The resolved parameter dict
"""
resolved_parameters = {}
for key, value in values.items():
# First find the matching Parameter definition, if possible
definition = Parameter()
for param_def in definitions or []:
if param_def.key == key:
definition = param_def
break
# Check to see if this is a nested parameter
if isinstance(value, CollectionsMapping) and definition.parameters:
resolved = self.resolve(
value, definitions=definition.parameters, upload=upload
)
# See if this is a multi parameter
elif isinstance(value, list):
# This is kind of gross because multi-parameters are kind of gross
# We have to wrap everything into the correct form and pull it out
resolved = []
for item in value:
resolved_item = self.resolve(
{key: item}, definitions=definitions, upload=upload
)
resolved.append(resolved_item[key])
# This is a simple parameter
else:
# See if this is a parameter that needs to be resolved
for resolver in self.resolvers:
if upload and resolver.should_upload(value, definition):
resolvable = resolver.upload(value, definition)
resolved = SchemaParser.serialize(resolvable, to_string=False)
break
elif (
not upload
and resolver.should_download(value, definition)
and isinstance(value, Mapping)
):
resolvable = Resolvable(**value)
resolved = resolver.download(resolvable, definition)
break
# Just a normal parameter
else:
resolved = value
resolved_parameters[key] = resolved
return resolved_parameters
| [
"logging.getLogger",
"brewtils.resolvers.chunks.ChunksResolver",
"brewtils.resolvers.bytes.BytesResolver",
"brewtils.models.Resolvable",
"brewtils.models.Parameter",
"brewtils.resolvers.identity.IdentityResolver",
"brewtils.schema_parser.SchemaParser.serialize"
] | [((587, 605), 'brewtils.resolvers.identity.IdentityResolver', 'IdentityResolver', ([], {}), '()\n', (603, 605), False, 'from brewtils.resolvers.identity import IdentityResolver\n'), ((646, 672), 'brewtils.resolvers.bytes.BytesResolver', 'BytesResolver', (['easy_client'], {}), '(easy_client)\n', (659, 672), False, 'from brewtils.resolvers.bytes import BytesResolver\n'), ((682, 709), 'brewtils.resolvers.chunks.ChunksResolver', 'ChunksResolver', (['easy_client'], {}), '(easy_client)\n', (696, 709), False, 'from brewtils.resolvers.chunks import ChunksResolver\n'), ((1366, 1393), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1383, 1393), False, 'import logging\n'), ((2085, 2096), 'brewtils.models.Parameter', 'Parameter', ([], {}), '()\n', (2094, 2096), False, 'from brewtils.models import Parameter, Resolvable\n'), ((3417, 3468), 'brewtils.schema_parser.SchemaParser.serialize', 'SchemaParser.serialize', (['resolvable'], {'to_string': '(False)'}), '(resolvable, to_string=False)\n', (3439, 3468), False, 'from brewtils.schema_parser import SchemaParser\n'), ((3748, 3767), 'brewtils.models.Resolvable', 'Resolvable', ([], {}), '(**value)\n', (3758, 3767), False, 'from brewtils.models import Parameter, Resolvable\n')] |
from dudes.Ranks import Ranks
import numpy as np
import sys
def printDebug(DEBUG, l):
if DEBUG: sys.stderr.write(str(l) + "\n")
def group_max(groups, data, pre_order=None):
if pre_order is None:
order = np.lexsort((data, groups))
else:
order = pre_order
groups = groups[order] #this is only needed if groups is unsorted
data = data[order]
index = np.empty(len(groups), 'bool')
index[-1] = True
index[:-1] = groups[1:] != groups[:-1]
if pre_order is None:
return order, index
else:
return index # Return the data array in an orderer way (matching the output of np.unique(groups))
def getNameRank(rankid):
# Returns the fixed ranks based on rankid
if rankid<len(Ranks.ranks):
return Ranks.ranks[rankid]
else:
return Ranks.ranks[-1] # more than one no_rank/strain
def getIndexRank(rank):
# Returns the fixed ranks based on rankid
return Ranks.ranks.index(rank) | [
"numpy.lexsort",
"dudes.Ranks.Ranks.ranks.index"
] | [((870, 893), 'dudes.Ranks.Ranks.ranks.index', 'Ranks.ranks.index', (['rank'], {}), '(rank)\n', (887, 893), False, 'from dudes.Ranks import Ranks\n'), ((213, 239), 'numpy.lexsort', 'np.lexsort', (['(data, groups)'], {}), '((data, groups))\n', (223, 239), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 3 10:27:25 2019
@author: alishbaimran
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from imutils import paths
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from keras.applications import VGG19
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.callbacks import EarlyStopping
# defining constants and variables
img_width, img_height = 128, 128
train_data_dir = "data/train"
validation_data_dir = "data/val"
test_data_dir = "data/test"
NB = 2
BS = 64
EPOCHS = 10
# creating train, validation and test data generators
TRAIN = len(list(paths.list_images(train_data_dir)))
VAL = len(list(paths.list_images(validation_data_dir)))
TEST = len(list(paths.list_images(test_data_dir)))
trainAug = ImageDataGenerator(rescale = 1./255,
fill_mode = "nearest")
valAug = ImageDataGenerator(rescale = 1./255,
fill_mode = "nearest")
trainGen = trainAug.flow_from_directory(
train_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = True,
class_mode = "categorical")
valGen = valAug.flow_from_directory(
validation_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = False,
class_mode = "categorical")
testGen = valAug.flow_from_directory(
test_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = False,
class_mode = "categorical")
# loading pre-trained model, training additional features and saving model
base_model = VGG19(weights = "imagenet", include_top=False,
input_shape = (img_width, img_height, 3))
x = base_model.output
x = Flatten()(x)
x = Dense(1024, activation = "relu")(x)
x = Dropout(0.4)(x)
x = Dense(256, activation = "relu")(x)
x = Dropout(0.2)(x)
preds = Dense(NB, activation = "softmax")(x)
model = Model(input = base_model.input, output = preds)
for i,layer in enumerate(model.layers):
print(i,layer.name)
for layer in model.layers[:16]:
layer.trainable=False
for layer in model.layers[16:]:
layer.trainable=True
model.summary()
early = EarlyStopping(monitor = 'val_acc', min_delta = 0,
patience = 10, verbose= 1 , mode = 'auto')
model.compile(loss = "binary_crossentropy",
optimizer = SGD(lr=0.001, momentum=0.9),
metrics=["accuracy"])
H = model.fit_generator(
trainGen,
epochs = EPOCHS,
steps_per_epoch = TRAIN // BS,
validation_data = valGen,
validation_steps = VAL // BS,
callbacks = [early])
model.save('model.h5')
# generating predictions using model
testGen.reset()
predictions = model.predict_generator(testGen, steps = (TEST // BS) + 1)
predictions = np.argmax(predictions, axis=1)
print("Test set accuracy: " +
str(accuracy_score(testGen.classes, predictions, normalize=True) * 100)
+ "%")
print(classification_report(testGen.classes, predictions,
target_names=testGen.class_indices.keys()))
# plotting training data
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, EPOCHS), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, EPOCHS), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, EPOCHS), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, EPOCHS), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig("plot.jpg") | [
"matplotlib.pyplot.ylabel",
"keras.preprocessing.image.ImageDataGenerator",
"keras.optimizers.SGD",
"imutils.paths.list_images",
"keras.layers.Dense",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"keras.applicat... | [((1032, 1090), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255, fill_mode='nearest')\n", (1050, 1090), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1125, 1183), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255, fill_mode='nearest')\n", (1143, 1183), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2102, 2190), 'keras.applications.VGG19', 'VGG19', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(img_width, img_height, 3)'}), "(weights='imagenet', include_top=False, input_shape=(img_width,\n img_height, 3))\n", (2107, 2190), False, 'from keras.applications import VGG19\n'), ((2433, 2476), 'keras.models.Model', 'Model', ([], {'input': 'base_model.input', 'output': 'preds'}), '(input=base_model.input, output=preds)\n', (2438, 2476), False, 'from keras.models import Model\n'), ((2700, 2787), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)', 'mode': '"""auto"""'}), "(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode=\n 'auto')\n", (2713, 2787), False, 'from keras.callbacks import EarlyStopping\n'), ((3363, 3393), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (3372, 3393), True, 'import numpy as np\n'), ((3689, 3712), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3702, 3712), True, 'import matplotlib.pyplot as plt\n'), ((3714, 3726), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3724, 3726), True, 'import matplotlib.pyplot as plt\n'), ((4012, 4062), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy on Dataset"""'], {}), "('Training Loss and Accuracy on Dataset')\n", (4021, 4062), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4085), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (4074, 4085), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4114), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (4097, 4114), True, 'import matplotlib.pyplot as plt\n'), ((4116, 4144), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (4126, 4144), True, 'import matplotlib.pyplot as plt\n'), ((4146, 4169), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.jpg"""'], {}), "('plot.jpg')\n", (4157, 4169), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2249), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2247, 2249), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2258, 2288), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (2263, 2288), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2299, 2311), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2306, 2311), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2320, 2349), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (2325, 2349), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2360, 2372), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2367, 2372), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2385, 2416), 'keras.layers.Dense', 'Dense', (['NB'], {'activation': '"""softmax"""'}), "(NB, activation='softmax')\n", (2390, 2416), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((3737, 3757), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3746, 3757), True, 'import numpy as np\n'), ((3808, 3828), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3817, 3828), True, 'import numpy as np\n'), ((3881, 3901), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3890, 3901), True, 'import numpy as np\n'), ((3950, 3970), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3959, 3970), True, 'import numpy as np\n'), ((873, 906), 'imutils.paths.list_images', 'paths.list_images', (['train_data_dir'], {}), '(train_data_dir)\n', (890, 906), False, 'from imutils import paths\n'), ((925, 963), 'imutils.paths.list_images', 'paths.list_images', (['validation_data_dir'], {}), '(validation_data_dir)\n', (942, 963), False, 'from imutils import paths\n'), ((983, 1015), 'imutils.paths.list_images', 'paths.list_images', (['test_data_dir'], {}), '(test_data_dir)\n', (1000, 1015), False, 'from imutils import paths\n'), ((2898, 2925), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.001)', 'momentum': '(0.9)'}), '(lr=0.001, momentum=0.9)\n', (2901, 2925), False, 'from keras.optimizers import SGD\n'), ((3439, 3499), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testGen.classes', 'predictions'], {'normalize': '(True)'}), '(testGen.classes, predictions, normalize=True)\n', (3453, 3499), False, 'from sklearn.metrics import accuracy_score\n')] |
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3@")
buf.write("\u027b\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t")
buf.write(";\4<\t<\4=\t=\4>\t>\3\2\7\2~\n\2\f\2\16\2\u0081\13\2\3")
buf.write("\2\3\2\3\2\3\2\7\2\u0087\n\2\f\2\16\2\u008a\13\2\3\2\3")
buf.write("\2\7\2\u008e\n\2\f\2\16\2\u0091\13\2\3\2\3\2\3\3\3\3\3")
buf.write("\3\3\3\3\3\5\3\u009a\n\3\3\4\3\4\3\4\3\4\3\4\5\4\u00a1")
buf.write("\n\4\3\5\3\5\3\5\3\5\7\5\u00a7\n\5\f\5\16\5\u00aa\13\5")
buf.write("\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\7\7\u00b5\n\7\f\7")
buf.write("\16\7\u00b8\13\7\3\7\3\7\3\7\5\7\u00bd\n\7\3\b\3\b\3\b")
buf.write("\3\b\7\b\u00c3\n\b\f\b\16\b\u00c6\13\b\3\b\3\b\3\t\3\t")
buf.write("\3\n\3\n\5\n\u00ce\n\n\3\n\3\n\3\13\3\13\5\13\u00d4\n")
buf.write("\13\3\13\3\13\3\f\3\f\3\f\7\f\u00db\n\f\f\f\16\f\u00de")
buf.write("\13\f\3\f\5\f\u00e1\n\f\3\r\3\r\3\r\3\r\5\r\u00e7\n\r")
buf.write("\3\r\3\r\3\r\3\16\3\16\3\16\5\16\u00ef\n\16\3\17\3\17")
buf.write("\7\17\u00f3\n\17\f\17\16\17\u00f6\13\17\3\17\3\17\3\20")
buf.write("\3\20\7\20\u00fc\n\20\f\20\16\20\u00ff\13\20\3\20\3\20")
buf.write("\3\21\3\21\3\21\3\21\7\21\u0107\n\21\f\21\16\21\u010a")
buf.write("\13\21\3\22\7\22\u010d\n\22\f\22\16\22\u0110\13\22\3\23")
buf.write("\7\23\u0113\n\23\f\23\16\23\u0116\13\23\3\23\3\23\5\23")
buf.write("\u011a\n\23\3\24\7\24\u011d\n\24\f\24\16\24\u0120\13\24")
buf.write("\3\24\5\24\u0123\n\24\3\24\3\24\5\24\u0127\n\24\3\24\5")
buf.write("\24\u012a\n\24\3\24\5\24\u012d\n\24\3\24\5\24\u0130\n")
buf.write("\24\3\24\7\24\u0133\n\24\f\24\16\24\u0136\13\24\3\24\3")
buf.write("\24\3\24\3\24\3\24\3\25\7\25\u013e\n\25\f\25\16\25\u0141")
buf.write("\13\25\3\25\5\25\u0144\n\25\3\26\3\26\3\26\3\26\3\27\3")
buf.write("\27\3\27\3\30\3\30\5\30\u014f\n\30\3\31\3\31\3\31\3\32")
buf.write("\3\32\3\32\3\32\7\32\u0158\n\32\f\32\16\32\u015b\13\32")
buf.write("\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\35\6\35\u0165\n")
buf.write("\35\r\35\16\35\u0166\3\36\3\36\3\37\3\37\3 \3 \3 \7 \u0170")
buf.write("\n \f \16 \u0173\13 \3!\3!\3!\5!\u0178\n!\3\"\7\"\u017b")
buf.write("\n\"\f\"\16\"\u017e\13\"\3\"\5\"\u0181\n\"\3\"\3\"\3\"")
buf.write("\3\"\3\"\3#\3#\3$\3$\3$\7$\u018d\n$\f$\16$\u0190\13$\3")
buf.write("%\3%\5%\u0194\n%\3%\5%\u0197\n%\3&\6&\u019a\n&\r&\16&")
buf.write("\u019b\3\'\3\'\5\'\u01a0\n\'\3\'\3\'\5\'\u01a4\n\'\3\'")
buf.write("\3\'\5\'\u01a8\n\'\3\'\3\'\5\'\u01ac\n\'\5\'\u01ae\n\'")
buf.write("\3(\3(\3(\3(\5(\u01b4\n(\3)\3)\3)\3)\3*\3*\3*\3*\7*\u01be")
buf.write("\n*\f*\16*\u01c1\13*\3+\3+\3+\3+\3+\3+\5+\u01c9\n+\3,")
buf.write("\3,\5,\u01cd\n,\3-\3-\5-\u01d1\n-\3.\3.\3.\7.\u01d6\n")
buf.write(".\f.\16.\u01d9\13.\3/\5/\u01dc\n/\3/\6/\u01df\n/\r/\16")
buf.write("/\u01e0\3/\5/\u01e4\n/\3\60\3\60\5\60\u01e8\n\60\3\60")
buf.write("\3\60\5\60\u01ec\n\60\3\60\3\60\5\60\u01f0\n\60\3\60\3")
buf.write("\60\5\60\u01f4\n\60\3\60\5\60\u01f7\n\60\3\61\3\61\3\61")
buf.write("\3\61\5\61\u01fd\n\61\3\62\3\62\5\62\u0201\n\62\3\62\3")
buf.write("\62\5\62\u0205\n\62\3\62\3\62\5\62\u0209\n\62\5\62\u020b")
buf.write("\n\62\3\63\3\63\3\63\3\63\3\63\3\63\5\63\u0213\n\63\3")
buf.write("\63\5\63\u0216\n\63\3\64\3\64\3\64\3\64\3\64\5\64\u021d")
buf.write("\n\64\5\64\u021f\n\64\3\65\3\65\3\65\3\65\5\65\u0225\n")
buf.write("\65\3\66\3\66\3\66\3\66\7\66\u022b\n\66\f\66\16\66\u022e")
buf.write("\13\66\3\66\3\66\3\67\3\67\5\67\u0234\n\67\3\67\3\67\5")
buf.write("\67\u0238\n\67\3\67\3\67\5\67\u023c\n\67\38\38\58\u0240")
buf.write("\n8\38\78\u0243\n8\f8\168\u0246\138\38\58\u0249\n8\38")
buf.write("\38\38\39\39\59\u0250\n9\39\59\u0253\n9\3:\3:\3:\3:\3")
buf.write(";\3;\5;\u025b\n;\3;\3;\5;\u025f\n;\5;\u0261\n;\3<\3<\3")
buf.write("<\3<\7<\u0267\n<\f<\16<\u026a\13<\3<\3<\3=\3=\3=\3=\3")
buf.write("=\5=\u0273\n=\5=\u0275\n=\3>\3>\5>\u0279\n>\3>\2\2?\2")
buf.write("\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64")
buf.write("\668:<>@BDFHJLNPRTVXZ\\^`bdfhjlnprtvxz\2\4\4\2\23\23\27")
buf.write("\31\4\2++..\2\u02a8\2\177\3\2\2\2\4\u0099\3\2\2\2\6\u00a0")
buf.write("\3\2\2\2\b\u00a2\3\2\2\2\n\u00ad\3\2\2\2\f\u00bc\3\2\2")
buf.write("\2\16\u00be\3\2\2\2\20\u00c9\3\2\2\2\22\u00cb\3\2\2\2")
buf.write("\24\u00d1\3\2\2\2\26\u00d7\3\2\2\2\30\u00e2\3\2\2\2\32")
buf.write("\u00ee\3\2\2\2\34\u00f0\3\2\2\2\36\u00f9\3\2\2\2 \u0102")
buf.write("\3\2\2\2\"\u010e\3\2\2\2$\u0114\3\2\2\2&\u011e\3\2\2\2")
buf.write("(\u013f\3\2\2\2*\u0145\3\2\2\2,\u0149\3\2\2\2.\u014e\3")
buf.write("\2\2\2\60\u0150\3\2\2\2\62\u0153\3\2\2\2\64\u015c\3\2")
buf.write("\2\2\66\u015f\3\2\2\28\u0164\3\2\2\2:\u0168\3\2\2\2<\u016a")
buf.write("\3\2\2\2>\u016c\3\2\2\2@\u0174\3\2\2\2B\u017c\3\2\2\2")
buf.write("D\u0187\3\2\2\2F\u0189\3\2\2\2H\u0196\3\2\2\2J\u0199\3")
buf.write("\2\2\2L\u01ad\3\2\2\2N\u01af\3\2\2\2P\u01b5\3\2\2\2R\u01b9")
buf.write("\3\2\2\2T\u01c8\3\2\2\2V\u01cc\3\2\2\2X\u01d0\3\2\2\2")
buf.write("Z\u01d2\3\2\2\2\\\u01e3\3\2\2\2^\u01f6\3\2\2\2`\u01f8")
buf.write("\3\2\2\2b\u020a\3\2\2\2d\u0215\3\2\2\2f\u021e\3\2\2\2")
buf.write("h\u0224\3\2\2\2j\u0226\3\2\2\2l\u023b\3\2\2\2n\u023d\3")
buf.write("\2\2\2p\u024d\3\2\2\2r\u0254\3\2\2\2t\u0260\3\2\2\2v\u0262")
buf.write("\3\2\2\2x\u0274\3\2\2\2z\u0278\3\2\2\2|~\7\6\2\2}|\3\2")
buf.write("\2\2~\u0081\3\2\2\2\177}\3\2\2\2\177\u0080\3\2\2\2\u0080")
buf.write("\u0082\3\2\2\2\u0081\177\3\2\2\2\u0082\u0083\5\4\3\2\u0083")
buf.write("\u0084\5z>\2\u0084\u0088\7#\2\2\u0085\u0087\5\6\4\2\u0086")
buf.write("\u0085\3\2\2\2\u0087\u008a\3\2\2\2\u0088\u0086\3\2\2\2")
buf.write("\u0088\u0089\3\2\2\2\u0089\u008b\3\2\2\2\u008a\u0088\3")
buf.write("\2\2\2\u008b\u008f\5\"\22\2\u008c\u008e\5 \21\2\u008d")
buf.write("\u008c\3\2\2\2\u008e\u0091\3\2\2\2\u008f\u008d\3\2\2\2")
buf.write("\u008f\u0090\3\2\2\2\u0090\u0092\3\2\2\2\u0091\u008f\3")
buf.write("\2\2\2\u0092\u0093\7\2\2\3\u0093\3\3\2\2\2\u0094\u0095")
buf.write("\7\24\2\2\u0095\u009a\7\26\2\2\u0096\u0097\7\25\2\2\u0097")
buf.write("\u009a\7\26\2\2\u0098\u009a\7\26\2\2\u0099\u0094\3\2\2")
buf.write("\2\u0099\u0096\3\2\2\2\u0099\u0098\3\2\2\2\u009a\5\3\2")
buf.write("\2\2\u009b\u00a1\5\b\5\2\u009c\u00a1\5\16\b\2\u009d\u00a1")
buf.write("\5\22\n\2\u009e\u00a1\5\24\13\2\u009f\u00a1\5\30\r\2\u00a0")
buf.write("\u009b\3\2\2\2\u00a0\u009c\3\2\2\2\u00a0\u009d\3\2\2\2")
buf.write("\u00a0\u009e\3\2\2\2\u00a0\u009f\3\2\2\2\u00a1\7\3\2\2")
buf.write("\2\u00a2\u00a8\7\17\2\2\u00a3\u00a4\5\n\6\2\u00a4\u00a5")
buf.write("\7#\2\2\u00a5\u00a7\3\2\2\2\u00a6\u00a3\3\2\2\2\u00a7")
buf.write("\u00aa\3\2\2\2\u00a8\u00a6\3\2\2\2\u00a8\u00a9\3\2\2\2")
buf.write("\u00a9\u00ab\3\2\2\2\u00aa\u00a8\3\2\2\2\u00ab\u00ac\7")
buf.write("\'\2\2\u00ac\t\3\2\2\2\u00ad\u00ae\5z>\2\u00ae\u00af\7")
buf.write("+\2\2\u00af\u00b0\5\f\7\2\u00b0\13\3\2\2\2\u00b1\u00b6")
buf.write("\5z>\2\u00b2\u00b3\7\63\2\2\u00b3\u00b5\5z>\2\u00b4\u00b2")
buf.write("\3\2\2\2\u00b5\u00b8\3\2\2\2\u00b6\u00b4\3\2\2\2\u00b6")
buf.write("\u00b7\3\2\2\2\u00b7\u00bd\3\2\2\2\u00b8\u00b6\3\2\2\2")
buf.write("\u00b9\u00bd\7\13\2\2\u00ba\u00bd\5\34\17\2\u00bb\u00bd")
buf.write("\7\n\2\2\u00bc\u00b1\3\2\2\2\u00bc\u00b9\3\2\2\2\u00bc")
buf.write("\u00ba\3\2\2\2\u00bc\u00bb\3\2\2\2\u00bd\r\3\2\2\2\u00be")
buf.write("\u00bf\7\22\2\2\u00bf\u00c4\5\20\t\2\u00c0\u00c1\7\"\2")
buf.write("\2\u00c1\u00c3\5\20\t\2\u00c2\u00c0\3\2\2\2\u00c3\u00c6")
buf.write("\3\2\2\2\u00c4\u00c2\3\2\2\2\u00c4\u00c5\3\2\2\2\u00c5")
buf.write("\u00c7\3\2\2\2\u00c6\u00c4\3\2\2\2\u00c7\u00c8\7#\2\2")
buf.write("\u00c8\17\3\2\2\2\u00c9\u00ca\5z>\2\u00ca\21\3\2\2\2\u00cb")
buf.write("\u00cd\7\20\2\2\u00cc\u00ce\5\26\f\2\u00cd\u00cc\3\2\2")
buf.write("\2\u00cd\u00ce\3\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\u00d0")
buf.write("\7\'\2\2\u00d0\23\3\2\2\2\u00d1\u00d3\7\21\2\2\u00d2\u00d4")
buf.write("\5\26\f\2\u00d3\u00d2\3\2\2\2\u00d3\u00d4\3\2\2\2\u00d4")
buf.write("\u00d5\3\2\2\2\u00d5\u00d6\7\'\2\2\u00d6\25\3\2\2\2\u00d7")
buf.write("\u00dc\5z>\2\u00d8\u00d9\7\"\2\2\u00d9\u00db\5z>\2\u00da")
buf.write("\u00d8\3\2\2\2\u00db\u00de\3\2\2\2\u00dc\u00da\3\2\2\2")
buf.write("\u00dc\u00dd\3\2\2\2\u00dd\u00e0\3\2\2\2\u00de\u00dc\3")
buf.write("\2\2\2\u00df\u00e1\7\"\2\2\u00e0\u00df\3\2\2\2\u00e0\u00e1")
buf.write("\3\2\2\2\u00e1\27\3\2\2\2\u00e2\u00e6\7\64\2\2\u00e3\u00e4")
buf.write("\5\32\16\2\u00e4\u00e5\7!\2\2\u00e5\u00e7\3\2\2\2\u00e6")
buf.write("\u00e3\3\2\2\2\u00e6\u00e7\3\2\2\2\u00e7\u00e8\3\2\2\2")
buf.write("\u00e8\u00e9\5z>\2\u00e9\u00ea\5\34\17\2\u00ea\31\3\2")
buf.write("\2\2\u00eb\u00ef\5z>\2\u00ec\u00ef\7\24\2\2\u00ed\u00ef")
buf.write("\7\25\2\2\u00ee\u00eb\3\2\2\2\u00ee\u00ec\3\2\2\2\u00ee")
buf.write("\u00ed\3\2\2\2\u00ef\33\3\2\2\2\u00f0\u00f4\7\16\2\2\u00f1")
buf.write("\u00f3\7?\2\2\u00f2\u00f1\3\2\2\2\u00f3\u00f6\3\2\2\2")
buf.write("\u00f4\u00f2\3\2\2\2\u00f4\u00f5\3\2\2\2\u00f5\u00f7\3")
buf.write("\2\2\2\u00f6\u00f4\3\2\2\2\u00f7\u00f8\7=\2\2\u00f8\35")
buf.write("\3\2\2\2\u00f9\u00fd\7\r\2\2\u00fa\u00fc\7<\2\2\u00fb")
buf.write("\u00fa\3\2\2\2\u00fc\u00ff\3\2\2\2\u00fd\u00fb\3\2\2\2")
buf.write("\u00fd\u00fe\3\2\2\2\u00fe\u0100\3\2\2\2\u00ff\u00fd\3")
buf.write("\2\2\2\u0100\u0101\7:\2\2\u0101\37\3\2\2\2\u0102\u0103")
buf.write("\7\37\2\2\u0103\u0104\5z>\2\u0104\u0108\7#\2\2\u0105\u0107")
buf.write("\5B\"\2\u0106\u0105\3\2\2\2\u0107\u010a\3\2\2\2\u0108")
buf.write("\u0106\3\2\2\2\u0108\u0109\3\2\2\2\u0109!\3\2\2\2\u010a")
buf.write("\u0108\3\2\2\2\u010b\u010d\5$\23\2\u010c\u010b\3\2\2\2")
buf.write("\u010d\u0110\3\2\2\2\u010e\u010c\3\2\2\2\u010e\u010f\3")
buf.write("\2\2\2\u010f#\3\2\2\2\u0110\u010e\3\2\2\2\u0111\u0113")
buf.write("\7\7\2\2\u0112\u0111\3\2\2\2\u0113\u0116\3\2\2\2\u0114")
buf.write("\u0112\3\2\2\2\u0114\u0115\3\2\2\2\u0115\u0119\3\2\2\2")
buf.write("\u0116\u0114\3\2\2\2\u0117\u011a\5&\24\2\u0118\u011a\5")
buf.write("B\"\2\u0119\u0117\3\2\2\2\u0119\u0118\3\2\2\2\u011a%\3")
buf.write("\2\2\2\u011b\u011d\7\6\2\2\u011c\u011b\3\2\2\2\u011d\u0120")
buf.write("\3\2\2\2\u011e\u011c\3\2\2\2\u011e\u011f\3\2\2\2\u011f")
buf.write("\u0122\3\2\2\2\u0120\u011e\3\2\2\2\u0121\u0123\58\35\2")
buf.write("\u0122\u0121\3\2\2\2\u0122\u0123\3\2\2\2\u0123\u0124\3")
buf.write("\2\2\2\u0124\u0126\7\4\2\2\u0125\u0127\5\36\20\2\u0126")
buf.write("\u0125\3\2\2\2\u0126\u0127\3\2\2\2\u0127\u0129\3\2\2\2")
buf.write("\u0128\u012a\5\60\31\2\u0129\u0128\3\2\2\2\u0129\u012a")
buf.write("\3\2\2\2\u012a\u012c\3\2\2\2\u012b\u012d\5\62\32\2\u012c")
buf.write("\u012b\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u012f\3\2\2\2")
buf.write("\u012e\u0130\5\64\33\2\u012f\u012e\3\2\2\2\u012f\u0130")
buf.write("\3\2\2\2\u0130\u0134\3\2\2\2\u0131\u0133\5.\30\2\u0132")
buf.write("\u0131\3\2\2\2\u0133\u0136\3\2\2\2\u0134\u0132\3\2\2\2")
buf.write("\u0134\u0135\3\2\2\2\u0135\u0137\3\2\2\2\u0136\u0134\3")
buf.write("\2\2\2\u0137\u0138\7 \2\2\u0138\u0139\5<\37\2\u0139\u013a")
buf.write("\7#\2\2\u013a\u013b\5(\25\2\u013b\'\3\2\2\2\u013c\u013e")
buf.write("\5*\26\2\u013d\u013c\3\2\2\2\u013e\u0141\3\2\2\2\u013f")
buf.write("\u013d\3\2\2\2\u013f\u0140\3\2\2\2\u0140\u0143\3\2\2\2")
buf.write("\u0141\u013f\3\2\2\2\u0142\u0144\5,\27\2\u0143\u0142\3")
buf.write("\2\2\2\u0143\u0144\3\2\2\2\u0144)\3\2\2\2\u0145\u0146")
buf.write("\7\35\2\2\u0146\u0147\5\36\20\2\u0147\u0148\5\34\17\2")
buf.write("\u0148+\3\2\2\2\u0149\u014a\7\36\2\2\u014a\u014b\5\34")
buf.write("\17\2\u014b-\3\2\2\2\u014c\u014f\5\b\5\2\u014d\u014f\5")
buf.write("\66\34\2\u014e\u014c\3\2\2\2\u014e\u014d\3\2\2\2\u014f")
buf.write("/\3\2\2\2\u0150\u0151\7\32\2\2\u0151\u0152\5\36\20\2\u0152")
buf.write("\61\3\2\2\2\u0153\u0154\7\34\2\2\u0154\u0159\5z>\2\u0155")
buf.write("\u0156\7\"\2\2\u0156\u0158\5z>\2\u0157\u0155\3\2\2\2\u0158")
buf.write("\u015b\3\2\2\2\u0159\u0157\3\2\2\2\u0159\u015a\3\2\2\2")
buf.write("\u015a\63\3\2\2\2\u015b\u0159\3\2\2\2\u015c\u015d\7\33")
buf.write("\2\2\u015d\u015e\5\36\20\2\u015e\65\3\2\2\2\u015f\u0160")
buf.write("\7\64\2\2\u0160\u0161\5z>\2\u0161\u0162\5\34\17\2\u0162")
buf.write("\67\3\2\2\2\u0163\u0165\5:\36\2\u0164\u0163\3\2\2\2\u0165")
buf.write("\u0166\3\2\2\2\u0166\u0164\3\2\2\2\u0166\u0167\3\2\2\2")
buf.write("\u01679\3\2\2\2\u0168\u0169\t\2\2\2\u0169;\3\2\2\2\u016a")
buf.write("\u016b\5> \2\u016b=\3\2\2\2\u016c\u0171\5@!\2\u016d\u016e")
buf.write("\7\60\2\2\u016e\u0170\5@!\2\u016f\u016d\3\2\2\2\u0170")
buf.write("\u0173\3\2\2\2\u0171\u016f\3\2\2\2\u0171\u0172\3\2\2\2")
buf.write("\u0172?\3\2\2\2\u0173\u0171\3\2\2\2\u0174\u0177\5\\/\2")
buf.write("\u0175\u0176\7\65\2\2\u0176\u0178\5z>\2\u0177\u0175\3")
buf.write("\2\2\2\u0177\u0178\3\2\2\2\u0178A\3\2\2\2\u0179\u017b")
buf.write("\7\6\2\2\u017a\u0179\3\2\2\2\u017b\u017e\3\2\2\2\u017c")
buf.write("\u017a\3\2\2\2\u017c\u017d\3\2\2\2\u017d\u0180\3\2\2\2")
buf.write("\u017e\u017c\3\2\2\2\u017f\u0181\7\23\2\2\u0180\u017f")
buf.write("\3\2\2\2\u0180\u0181\3\2\2\2\u0181\u0182\3\2\2\2\u0182")
buf.write("\u0183\7\3\2\2\u0183\u0184\7 \2\2\u0184\u0185\5D#\2\u0185")
buf.write("\u0186\7#\2\2\u0186C\3\2\2\2\u0187\u0188\5F$\2\u0188E")
buf.write("\3\2\2\2\u0189\u018e\5H%\2\u018a\u018b\7\60\2\2\u018b")
buf.write("\u018d\5H%\2\u018c\u018a\3\2\2\2\u018d\u0190\3\2\2\2\u018e")
buf.write("\u018c\3\2\2\2\u018e\u018f\3\2\2\2\u018fG\3\2\2\2\u0190")
buf.write("\u018e\3\2\2\2\u0191\u0193\5J&\2\u0192\u0194\5R*\2\u0193")
buf.write("\u0192\3\2\2\2\u0193\u0194\3\2\2\2\u0194\u0197\3\2\2\2")
buf.write("\u0195\u0197\3\2\2\2\u0196\u0191\3\2\2\2\u0196\u0195\3")
buf.write("\2\2\2\u0197I\3\2\2\2\u0198\u019a\5L\'\2\u0199\u0198\3")
buf.write("\2\2\2\u019a\u019b\3\2\2\2\u019b\u0199\3\2\2\2\u019b\u019c")
buf.write("\3\2\2\2\u019cK\3\2\2\2\u019d\u019f\5N(\2\u019e\u01a0")
buf.write("\5b\62\2\u019f\u019e\3\2\2\2\u019f\u01a0\3\2\2\2\u01a0")
buf.write("\u01ae\3\2\2\2\u01a1\u01a3\5d\63\2\u01a2\u01a4\5b\62\2")
buf.write("\u01a3\u01a2\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4\u01ae\3")
buf.write("\2\2\2\u01a5\u01a7\5P)\2\u01a6\u01a8\5b\62\2\u01a7\u01a6")
buf.write("\3\2\2\2\u01a7\u01a8\3\2\2\2\u01a8\u01ae\3\2\2\2\u01a9")
buf.write("\u01ab\5\34\17\2\u01aa\u01ac\7,\2\2\u01ab\u01aa\3\2\2")
buf.write("\2\u01ab\u01ac\3\2\2\2\u01ac\u01ae\3\2\2\2\u01ad\u019d")
buf.write("\3\2\2\2\u01ad\u01a1\3\2\2\2\u01ad\u01a5\3\2\2\2\u01ad")
buf.write("\u01a9\3\2\2\2\u01aeM\3\2\2\2\u01af\u01b0\5z>\2\u01b0")
buf.write("\u01b3\t\3\2\2\u01b1\u01b4\5d\63\2\u01b2\u01b4\5P)\2\u01b3")
buf.write("\u01b1\3\2\2\2\u01b3\u01b2\3\2\2\2\u01b4O\3\2\2\2\u01b5")
buf.write("\u01b6\7$\2\2\u01b6\u01b7\5F$\2\u01b7\u01b8\7%\2\2\u01b8")
buf.write("Q\3\2\2\2\u01b9\u01ba\7(\2\2\u01ba\u01bf\5T+\2\u01bb\u01bc")
buf.write("\7\"\2\2\u01bc\u01be\5T+\2\u01bd\u01bb\3\2\2\2\u01be\u01c1")
buf.write("\3\2\2\2\u01bf\u01bd\3\2\2\2\u01bf\u01c0\3\2\2\2\u01c0")
buf.write("S\3\2\2\2\u01c1\u01bf\3\2\2\2\u01c2\u01c3\5V,\2\u01c3")
buf.write("\u01c4\7$\2\2\u01c4\u01c5\5X-\2\u01c5\u01c6\7%\2\2\u01c6")
buf.write("\u01c9\3\2\2\2\u01c7\u01c9\5V,\2\u01c8\u01c2\3\2\2\2\u01c8")
buf.write("\u01c7\3\2\2\2\u01c9U\3\2\2\2\u01ca\u01cd\5z>\2\u01cb")
buf.write("\u01cd\7\37\2\2\u01cc\u01ca\3\2\2\2\u01cc\u01cb\3\2\2")
buf.write("\2\u01cdW\3\2\2\2\u01ce\u01d1\5z>\2\u01cf\u01d1\7\n\2")
buf.write("\2\u01d0\u01ce\3\2\2\2\u01d0\u01cf\3\2\2\2\u01d1Y\3\2")
buf.write("\2\2\u01d2\u01d7\5\\/\2\u01d3\u01d4\7\60\2\2\u01d4\u01d6")
buf.write("\5\\/\2\u01d5\u01d3\3\2\2\2\u01d6\u01d9\3\2\2\2\u01d7")
buf.write("\u01d5\3\2\2\2\u01d7\u01d8\3\2\2\2\u01d8[\3\2\2\2\u01d9")
buf.write("\u01d7\3\2\2\2\u01da\u01dc\5v<\2\u01db\u01da\3\2\2\2\u01db")
buf.write("\u01dc\3\2\2\2\u01dc\u01de\3\2\2\2\u01dd\u01df\5^\60\2")
buf.write("\u01de\u01dd\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0\u01de\3")
buf.write("\2\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e4\3\2\2\2\u01e2\u01e4")
buf.write("\3\2\2\2\u01e3\u01db\3\2\2\2\u01e3\u01e2\3\2\2\2\u01e4")
buf.write("]\3\2\2\2\u01e5\u01e7\5`\61\2\u01e6\u01e8\5b\62\2\u01e7")
buf.write("\u01e6\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u01f7\3\2\2\2")
buf.write("\u01e9\u01eb\5f\64\2\u01ea\u01ec\5b\62\2\u01eb\u01ea\3")
buf.write("\2\2\2\u01eb\u01ec\3\2\2\2\u01ec\u01f7\3\2\2\2\u01ed\u01ef")
buf.write("\5n8\2\u01ee\u01f0\5b\62\2\u01ef\u01ee\3\2\2\2\u01ef\u01f0")
buf.write("\3\2\2\2\u01f0\u01f7\3\2\2\2\u01f1\u01f3\5\34\17\2\u01f2")
buf.write("\u01f4\7,\2\2\u01f3\u01f2\3\2\2\2\u01f3\u01f4\3\2\2\2")
buf.write("\u01f4\u01f7\3\2\2\2\u01f5\u01f7\7\6\2\2\u01f6\u01e5\3")
buf.write("\2\2\2\u01f6\u01e9\3\2\2\2\u01f6\u01ed\3\2\2\2\u01f6\u01f1")
buf.write("\3\2\2\2\u01f6\u01f5\3\2\2\2\u01f7_\3\2\2\2\u01f8\u01f9")
buf.write("\5z>\2\u01f9\u01fc\t\3\2\2\u01fa\u01fd\5f\64\2\u01fb\u01fd")
buf.write("\5n8\2\u01fc\u01fa\3\2\2\2\u01fc\u01fb\3\2\2\2\u01fda")
buf.write("\3\2\2\2\u01fe\u0200\7,\2\2\u01ff\u0201\7,\2\2\u0200\u01ff")
buf.write("\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u020b\3\2\2\2\u0202")
buf.write("\u0204\7-\2\2\u0203\u0205\7,\2\2\u0204\u0203\3\2\2\2\u0204")
buf.write("\u0205\3\2\2\2\u0205\u020b\3\2\2\2\u0206\u0208\7/\2\2")
buf.write("\u0207\u0209\7,\2\2\u0208\u0207\3\2\2\2\u0208\u0209\3")
buf.write("\2\2\2\u0209\u020b\3\2\2\2\u020a\u01fe\3\2\2\2\u020a\u0202")
buf.write("\3\2\2\2\u020a\u0206\3\2\2\2\u020bc\3\2\2\2\u020c\u0216")
buf.write("\5r:\2\u020d\u0216\5t;\2\u020e\u0216\5h\65\2\u020f\u0216")
buf.write("\7\5\2\2\u0210\u0212\7\63\2\2\u0211\u0213\5v<\2\u0212")
buf.write("\u0211\3\2\2\2\u0212\u0213\3\2\2\2\u0213\u0216\3\2\2\2")
buf.write("\u0214\u0216\7\6\2\2\u0215\u020c\3\2\2\2\u0215\u020d\3")
buf.write("\2\2\2\u0215\u020e\3\2\2\2\u0215\u020f\3\2\2\2\u0215\u0210")
buf.write("\3\2\2\2\u0215\u0214\3\2\2\2\u0216e\3\2\2\2\u0217\u021f")
buf.write("\5t;\2\u0218\u021f\5p9\2\u0219\u021f\5h\65\2\u021a\u021c")
buf.write("\7\63\2\2\u021b\u021d\5v<\2\u021c\u021b\3\2\2\2\u021c")
buf.write("\u021d\3\2\2\2\u021d\u021f\3\2\2\2\u021e\u0217\3\2\2\2")
buf.write("\u021e\u0218\3\2\2\2\u021e\u0219\3\2\2\2\u021e\u021a\3")
buf.write("\2\2\2\u021fg\3\2\2\2\u0220\u0221\7\66\2\2\u0221\u0225")
buf.write("\5l\67\2\u0222\u0223\7\66\2\2\u0223\u0225\5j\66\2\u0224")
buf.write("\u0220\3\2\2\2\u0224\u0222\3\2\2\2\u0225i\3\2\2\2\u0226")
buf.write("\u0227\7$\2\2\u0227\u022c\5l\67\2\u0228\u0229\7\60\2\2")
buf.write("\u0229\u022b\5l\67\2\u022a\u0228\3\2\2\2\u022b\u022e\3")
buf.write("\2\2\2\u022c\u022a\3\2\2\2\u022c\u022d\3\2\2\2\u022d\u022f")
buf.write("\3\2\2\2\u022e\u022c\3\2\2\2\u022f\u0230\7%\2\2\u0230")
buf.write("k\3\2\2\2\u0231\u0233\7\3\2\2\u0232\u0234\5v<\2\u0233")
buf.write("\u0232\3\2\2\2\u0233\u0234\3\2\2\2\u0234\u023c\3\2\2\2")
buf.write("\u0235\u0237\7\13\2\2\u0236\u0238\5v<\2\u0237\u0236\3")
buf.write("\2\2\2\u0237\u0238\3\2\2\2\u0238\u023c\3\2\2\2\u0239\u023c")
buf.write("\5r:\2\u023a\u023c\7\5\2\2\u023b\u0231\3\2\2\2\u023b\u0235")
buf.write("\3\2\2\2\u023b\u0239\3\2\2\2\u023b\u023a\3\2\2\2\u023c")
buf.write("m\3\2\2\2\u023d\u0248\7$\2\2\u023e\u0240\5\b\5\2\u023f")
buf.write("\u023e\3\2\2\2\u023f\u0240\3\2\2\2\u0240\u0244\3\2\2\2")
buf.write("\u0241\u0243\5\66\34\2\u0242\u0241\3\2\2\2\u0243\u0246")
buf.write("\3\2\2\2\u0244\u0242\3\2\2\2\u0244\u0245\3\2\2\2\u0245")
buf.write("\u0247\3\2\2\2\u0246\u0244\3\2\2\2\u0247\u0249\7 \2\2")
buf.write("\u0248\u023f\3\2\2\2\u0248\u0249\3\2\2\2\u0249\u024a\3")
buf.write("\2\2\2\u024a\u024b\5Z.\2\u024b\u024c\7%\2\2\u024co\3\2")
buf.write("\2\2\u024d\u024f\7\4\2\2\u024e\u0250\5\36\20\2\u024f\u024e")
buf.write("\3\2\2\2\u024f\u0250\3\2\2\2\u0250\u0252\3\2\2\2\u0251")
buf.write("\u0253\5v<\2\u0252\u0251\3\2\2\2\u0252\u0253\3\2\2\2\u0253")
buf.write("q\3\2\2\2\u0254\u0255\7\13\2\2\u0255\u0256\7\62\2\2\u0256")
buf.write("\u0257\7\13\2\2\u0257s\3\2\2\2\u0258\u025a\7\3\2\2\u0259")
buf.write("\u025b\5v<\2\u025a\u0259\3\2\2\2\u025a\u025b\3\2\2\2\u025b")
buf.write("\u0261\3\2\2\2\u025c\u025e\7\13\2\2\u025d\u025f\5v<\2")
buf.write("\u025e\u025d\3\2\2\2\u025e\u025f\3\2\2\2\u025f\u0261\3")
buf.write("\2\2\2\u0260\u0258\3\2\2\2\u0260\u025c\3\2\2\2\u0261u")
buf.write("\3\2\2\2\u0262\u0263\7)\2\2\u0263\u0268\5x=\2\u0264\u0265")
buf.write("\7\"\2\2\u0265\u0267\5x=\2\u0266\u0264\3\2\2\2\u0267\u026a")
buf.write("\3\2\2\2\u0268\u0266\3\2\2\2\u0268\u0269\3\2\2\2\u0269")
buf.write("\u026b\3\2\2\2\u026a\u0268\3\2\2\2\u026b\u026c\7*\2\2")
buf.write("\u026cw\3\2\2\2\u026d\u0275\5z>\2\u026e\u026f\5z>\2\u026f")
buf.write("\u0272\7+\2\2\u0270\u0273\5z>\2\u0271\u0273\7\13\2\2\u0272")
buf.write("\u0270\3\2\2\2\u0272\u0271\3\2\2\2\u0273\u0275\3\2\2\2")
buf.write("\u0274\u026d\3\2\2\2\u0274\u026e\3\2\2\2\u0275y\3\2\2")
buf.write("\2\u0276\u0279\7\4\2\2\u0277\u0279\7\3\2\2\u0278\u0276")
buf.write("\3\2\2\2\u0278\u0277\3\2\2\2\u0279{\3\2\2\2X\177\u0088")
buf.write("\u008f\u0099\u00a0\u00a8\u00b6\u00bc\u00c4\u00cd\u00d3")
buf.write("\u00dc\u00e0\u00e6\u00ee\u00f4\u00fd\u0108\u010e\u0114")
buf.write("\u0119\u011e\u0122\u0126\u0129\u012c\u012f\u0134\u013f")
buf.write("\u0143\u014e\u0159\u0166\u0171\u0177\u017c\u0180\u018e")
buf.write("\u0193\u0196\u019b\u019f\u01a3\u01a7\u01ab\u01ad\u01b3")
buf.write("\u01bf\u01c8\u01cc\u01d0\u01d7\u01db\u01e0\u01e3\u01e7")
buf.write("\u01eb\u01ef\u01f3\u01f6\u01fc\u0200\u0204\u0208\u020a")
buf.write("\u0212\u0215\u021c\u021e\u0224\u022c\u0233\u0237\u023b")
buf.write("\u023f\u0244\u0248\u024f\u0252\u025a\u025e\u0260\u0268")
buf.write("\u0272\u0274\u0278")
return buf.getvalue()
class ANTLRv4Parser ( Parser ):
grammarFileName = "ANTLRv4Parser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"'import'", "'fragment'", "'lexer'", "'parser'", "'grammar'",
"'protected'", "'public'", "'private'", "'returns'",
"'locals'", "'throws'", "'catch'", "'finally'", "'mode'" ]
symbolicNames = [ "<INVALID>", "TOKEN_REF", "RULE_REF", "LEXER_CHAR_SET",
"DOC_COMMENT", "HEADER", "BLOCK_COMMENT", "LINE_COMMENT",
"INT", "STRING_LITERAL", "UNTERMINATED_STRING_LITERAL",
"BEGIN_ARGUMENT", "BEGIN_ACTION", "OPTIONS", "TOKENS",
"CHANNELS", "IMPORT", "FRAGMENT", "LEXER", "PARSER",
"GRAMMAR", "PROTECTED", "PUBLIC", "PRIVATE", "RETURNS",
"LOCALS", "THROWS", "CATCH", "FINALLY", "MODE", "COLON",
"COLONCOLON", "COMMA", "SEMI", "LPAREN", "RPAREN",
"LBRACE", "RBRACE", "RARROW", "LT", "GT", "ASSIGN",
"QUESTION", "STAR", "PLUS_ASSIGN", "PLUS", "OR", "DOLLAR",
"RANGE", "DOT", "AT", "POUND", "NOT", "ID", "WS",
"ERRCHAR", "END_ARGUMENT", "UNTERMINATED_ARGUMENT",
"ARGUMENT_CONTENT", "END_ACTION", "UNTERMINATED_ACTION",
"ACTION_CONTENT", "UNTERMINATED_CHAR_SET" ]
RULE_grammarSpec = 0
RULE_grammarType = 1
RULE_prequelConstruct = 2
RULE_optionsSpec = 3
RULE_option = 4
RULE_optionValue = 5
RULE_delegateGrammars = 6
RULE_delegateGrammar = 7
RULE_tokensSpec = 8
RULE_channelsSpec = 9
RULE_idList = 10
RULE_action = 11
RULE_actionScopeName = 12
RULE_actionBlock = 13
RULE_argActionBlock = 14
RULE_modeSpec = 15
RULE_rules = 16
RULE_ruleSpec = 17
RULE_parserRuleSpec = 18
RULE_exceptionGroup = 19
RULE_exceptionHandler = 20
RULE_finallyClause = 21
RULE_rulePrequel = 22
RULE_ruleReturns = 23
RULE_throwsSpec = 24
RULE_localsSpec = 25
RULE_ruleAction = 26
RULE_ruleModifiers = 27
RULE_ruleModifier = 28
RULE_ruleBlock = 29
RULE_ruleAltList = 30
RULE_labeledAlt = 31
RULE_lexerRuleSpec = 32
RULE_lexerRuleBlock = 33
RULE_lexerAltList = 34
RULE_lexerAlt = 35
RULE_lexerElements = 36
RULE_lexerElement = 37
RULE_labeledLexerElement = 38
RULE_lexerBlock = 39
RULE_lexerCommands = 40
RULE_lexerCommand = 41
RULE_lexerCommandName = 42
RULE_lexerCommandExpr = 43
RULE_altList = 44
RULE_alternative = 45
RULE_element = 46
RULE_labeledElement = 47
RULE_ebnfSuffix = 48
RULE_lexerAtom = 49
RULE_atom = 50
RULE_notSet = 51
RULE_blockSet = 52
RULE_setElement = 53
RULE_block = 54
RULE_ruleref = 55
RULE_characterRange = 56
RULE_terminal = 57
RULE_elementOptions = 58
RULE_elementOption = 59
RULE_identifier = 60
ruleNames = [ "grammarSpec", "grammarType", "prequelConstruct", "optionsSpec",
"option", "optionValue", "delegateGrammars", "delegateGrammar",
"tokensSpec", "channelsSpec", "idList", "action", "actionScopeName",
"actionBlock", "argActionBlock", "modeSpec", "rules",
"ruleSpec", "parserRuleSpec", "exceptionGroup", "exceptionHandler",
"finallyClause", "rulePrequel", "ruleReturns", "throwsSpec",
"localsSpec", "ruleAction", "ruleModifiers", "ruleModifier",
"ruleBlock", "ruleAltList", "labeledAlt", "lexerRuleSpec",
"lexerRuleBlock", "lexerAltList", "lexerAlt", "lexerElements",
"lexerElement", "labeledLexerElement", "lexerBlock",
"lexerCommands", "lexerCommand", "lexerCommandName",
"lexerCommandExpr", "altList", "alternative", "element",
"labeledElement", "ebnfSuffix", "lexerAtom", "atom",
"notSet", "blockSet", "setElement", "block", "ruleref",
"characterRange", "terminal", "elementOptions", "elementOption",
"identifier" ]
EOF = Token.EOF
TOKEN_REF=1
RULE_REF=2
LEXER_CHAR_SET=3
DOC_COMMENT=4
HEADER=5
BLOCK_COMMENT=6
LINE_COMMENT=7
INT=8
STRING_LITERAL=9
UNTERMINATED_STRING_LITERAL=10
BEGIN_ARGUMENT=11
BEGIN_ACTION=12
OPTIONS=13
TOKENS=14
CHANNELS=15
IMPORT=16
FRAGMENT=17
LEXER=18
PARSER=19
GRAMMAR=20
PROTECTED=21
PUBLIC=22
PRIVATE=23
RETURNS=24
LOCALS=25
THROWS=26
CATCH=27
FINALLY=28
MODE=29
COLON=30
COLONCOLON=31
COMMA=32
SEMI=33
LPAREN=34
RPAREN=35
LBRACE=36
RBRACE=37
RARROW=38
LT=39
GT=40
ASSIGN=41
QUESTION=42
STAR=43
PLUS_ASSIGN=44
PLUS=45
OR=46
DOLLAR=47
RANGE=48
DOT=49
AT=50
POUND=51
NOT=52
ID=53
WS=54
ERRCHAR=55
END_ARGUMENT=56
UNTERMINATED_ARGUMENT=57
ARGUMENT_CONTENT=58
END_ACTION=59
UNTERMINATED_ACTION=60
ACTION_CONTENT=61
UNTERMINATED_CHAR_SET=62
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class GrammarSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.gtype = None # GrammarTypeContext
self.gname = None # IdentifierContext
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def rules(self):
return self.getTypedRuleContext(ANTLRv4Parser.RulesContext,0)
def EOF(self):
return self.getToken(ANTLRv4Parser.EOF, 0)
def grammarType(self):
return self.getTypedRuleContext(ANTLRv4Parser.GrammarTypeContext,0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def prequelConstruct(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.PrequelConstructContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.PrequelConstructContext,i)
def modeSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ModeSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ModeSpecContext,i)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_grammarSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrammarSpec" ):
listener.enterGrammarSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrammarSpec" ):
listener.exitGrammarSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrammarSpec" ):
return visitor.visitGrammarSpec(self)
else:
return visitor.visitChildren(self)
def grammarSpec(self):
localctx = ANTLRv4Parser.GrammarSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_grammarSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 125
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 122
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 127
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 128
localctx.gtype = self.grammarType()
self.state = 129
localctx.gname = self.identifier()
self.state = 130
self.match(ANTLRv4Parser.SEMI)
self.state = 134
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.TOKENS) | (1 << ANTLRv4Parser.CHANNELS) | (1 << ANTLRv4Parser.IMPORT) | (1 << ANTLRv4Parser.AT))) != 0):
self.state = 131
self.prequelConstruct()
self.state = 136
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 137
self.rules()
self.state = 141
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.MODE:
self.state = 138
self.modeSpec()
self.state = 143
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 144
self.match(ANTLRv4Parser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GrammarTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER(self):
return self.getToken(ANTLRv4Parser.LEXER, 0)
def GRAMMAR(self):
return self.getToken(ANTLRv4Parser.GRAMMAR, 0)
def PARSER(self):
return self.getToken(ANTLRv4Parser.PARSER, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_grammarType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrammarType" ):
listener.enterGrammarType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrammarType" ):
listener.exitGrammarType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrammarType" ):
return visitor.visitGrammarType(self)
else:
return visitor.visitChildren(self)
def grammarType(self):
localctx = ANTLRv4Parser.GrammarTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_grammarType)
try:
self.enterOuterAlt(localctx, 1)
self.state = 151
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.LEXER]:
self.state = 146
self.match(ANTLRv4Parser.LEXER)
self.state = 147
self.match(ANTLRv4Parser.GRAMMAR)
pass
elif token in [ANTLRv4Parser.PARSER]:
self.state = 148
self.match(ANTLRv4Parser.PARSER)
self.state = 149
self.match(ANTLRv4Parser.GRAMMAR)
pass
elif token in [ANTLRv4Parser.GRAMMAR]:
self.state = 150
self.match(ANTLRv4Parser.GRAMMAR)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrequelConstructContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def delegateGrammars(self):
return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarsContext,0)
def tokensSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.TokensSpecContext,0)
def channelsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ChannelsSpecContext,0)
def action(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_prequelConstruct
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrequelConstruct" ):
listener.enterPrequelConstruct(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrequelConstruct" ):
listener.exitPrequelConstruct(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrequelConstruct" ):
return visitor.visitPrequelConstruct(self)
else:
return visitor.visitChildren(self)
def prequelConstruct(self):
localctx = ANTLRv4Parser.PrequelConstructContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_prequelConstruct)
try:
self.state = 158
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.OPTIONS]:
self.enterOuterAlt(localctx, 1)
self.state = 153
self.optionsSpec()
pass
elif token in [ANTLRv4Parser.IMPORT]:
self.enterOuterAlt(localctx, 2)
self.state = 154
self.delegateGrammars()
pass
elif token in [ANTLRv4Parser.TOKENS]:
self.enterOuterAlt(localctx, 3)
self.state = 155
self.tokensSpec()
pass
elif token in [ANTLRv4Parser.CHANNELS]:
self.enterOuterAlt(localctx, 4)
self.state = 156
self.channelsSpec()
pass
elif token in [ANTLRv4Parser.AT]:
self.enterOuterAlt(localctx, 5)
self.state = 157
self.action()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPTIONS(self):
return self.getToken(ANTLRv4Parser.OPTIONS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def option(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.OptionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.OptionContext,i)
def SEMI(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.SEMI)
else:
return self.getToken(ANTLRv4Parser.SEMI, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_optionsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOptionsSpec" ):
listener.enterOptionsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOptionsSpec" ):
listener.exitOptionsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOptionsSpec" ):
return visitor.visitOptionsSpec(self)
else:
return visitor.visitChildren(self)
def optionsSpec(self):
localctx = ANTLRv4Parser.OptionsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_optionsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 160
self.match(ANTLRv4Parser.OPTIONS)
self.state = 166
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 161
self.option()
self.state = 162
self.match(ANTLRv4Parser.SEMI)
self.state = 168
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 169
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
self.value = None # OptionValueContext
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def optionValue(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionValueContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_option
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOption" ):
listener.enterOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOption" ):
listener.exitOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOption" ):
return visitor.visitOption(self)
else:
return visitor.visitChildren(self)
def option(self):
localctx = ANTLRv4Parser.OptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_option)
try:
self.enterOuterAlt(localctx, 1)
self.state = 171
localctx.name = self.identifier()
self.state = 172
self.match(ANTLRv4Parser.ASSIGN)
self.state = 173
localctx.value = self.optionValue()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionValueContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_optionValue
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class StringOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringOption" ):
listener.enterStringOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringOption" ):
listener.exitStringOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStringOption" ):
return visitor.visitStringOption(self)
else:
return visitor.visitChildren(self)
class IntOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def INT(self):
return self.getToken(ANTLRv4Parser.INT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntOption" ):
listener.enterIntOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntOption" ):
listener.exitIntOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntOption" ):
return visitor.visitIntOption(self)
else:
return visitor.visitChildren(self)
class ActionOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # ActionBlockContext
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionOption" ):
listener.enterActionOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionOption" ):
listener.exitActionOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionOption" ):
return visitor.visitActionOption(self)
else:
return visitor.visitChildren(self)
class PathOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self._identifier = None # IdentifierContext
self.value = list() # of IdentifierContexts
self.copyFrom(ctx)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def DOT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOT)
else:
return self.getToken(ANTLRv4Parser.DOT, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPathOption" ):
listener.enterPathOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPathOption" ):
listener.exitPathOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPathOption" ):
return visitor.visitPathOption(self)
else:
return visitor.visitChildren(self)
def optionValue(self):
localctx = ANTLRv4Parser.OptionValueContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_optionValue)
self._la = 0 # Token type
try:
self.state = 186
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.PathOptionContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 175
localctx._identifier = self.identifier()
localctx.value.append(localctx._identifier)
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOT:
self.state = 176
self.match(ANTLRv4Parser.DOT)
self.state = 177
localctx._identifier = self.identifier()
localctx.value.append(localctx._identifier)
self.state = 182
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.StringOptionContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 183
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
pass
elif token in [ANTLRv4Parser.BEGIN_ACTION]:
localctx = ANTLRv4Parser.ActionOptionContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 184
localctx.value = self.actionBlock()
pass
elif token in [ANTLRv4Parser.INT]:
localctx = ANTLRv4Parser.IntOptionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 185
localctx.value = self.match(ANTLRv4Parser.INT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DelegateGrammarsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IMPORT(self):
return self.getToken(ANTLRv4Parser.IMPORT, 0)
def delegateGrammar(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.DelegateGrammarContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarContext,i)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_delegateGrammars
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelegateGrammars" ):
listener.enterDelegateGrammars(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelegateGrammars" ):
listener.exitDelegateGrammars(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelegateGrammars" ):
return visitor.visitDelegateGrammars(self)
else:
return visitor.visitChildren(self)
def delegateGrammars(self):
localctx = ANTLRv4Parser.DelegateGrammarsContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_delegateGrammars)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 188
self.match(ANTLRv4Parser.IMPORT)
self.state = 189
self.delegateGrammar()
self.state = 194
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 190
self.match(ANTLRv4Parser.COMMA)
self.state = 191
self.delegateGrammar()
self.state = 196
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 197
self.match(ANTLRv4Parser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DelegateGrammarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None # IdentifierContext
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_delegateGrammar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelegateGrammar" ):
listener.enterDelegateGrammar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelegateGrammar" ):
listener.exitDelegateGrammar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelegateGrammar" ):
return visitor.visitDelegateGrammar(self)
else:
return visitor.visitChildren(self)
def delegateGrammar(self):
localctx = ANTLRv4Parser.DelegateGrammarContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_delegateGrammar)
try:
self.enterOuterAlt(localctx, 1)
self.state = 199
localctx.value = self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TokensSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.defs = None # IdListContext
def TOKENS(self):
return self.getToken(ANTLRv4Parser.TOKENS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def idList(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_tokensSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTokensSpec" ):
listener.enterTokensSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTokensSpec" ):
listener.exitTokensSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTokensSpec" ):
return visitor.visitTokensSpec(self)
else:
return visitor.visitChildren(self)
def tokensSpec(self):
localctx = ANTLRv4Parser.TokensSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_tokensSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 201
self.match(ANTLRv4Parser.TOKENS)
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 202
localctx.defs = self.idList()
self.state = 205
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ChannelsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CHANNELS(self):
return self.getToken(ANTLRv4Parser.CHANNELS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def idList(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_channelsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterChannelsSpec" ):
listener.enterChannelsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitChannelsSpec" ):
listener.exitChannelsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitChannelsSpec" ):
return visitor.visitChannelsSpec(self)
else:
return visitor.visitChildren(self)
def channelsSpec(self):
localctx = ANTLRv4Parser.ChannelsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_channelsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 207
self.match(ANTLRv4Parser.CHANNELS)
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 208
self.idList()
self.state = 211
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._identifier = None # IdentifierContext
self.defs = list() # of IdentifierContexts
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_idList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdList" ):
listener.enterIdList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdList" ):
listener.exitIdList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdList" ):
return visitor.visitIdList(self)
else:
return visitor.visitChildren(self)
def idList(self):
localctx = ANTLRv4Parser.IdListContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_idList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 213
localctx._identifier = self.identifier()
localctx.defs.append(localctx._identifier)
self.state = 218
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 214
self.match(ANTLRv4Parser.COMMA)
self.state = 215
localctx._identifier = self.identifier()
localctx.defs.append(localctx._identifier)
self.state = 220
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
self.state = 222
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.COMMA:
self.state = 221
self.match(ANTLRv4Parser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AT(self):
return self.getToken(ANTLRv4Parser.AT, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def actionScopeName(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionScopeNameContext,0)
def COLONCOLON(self):
return self.getToken(ANTLRv4Parser.COLONCOLON, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_action
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAction" ):
listener.enterAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAction" ):
listener.exitAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAction" ):
return visitor.visitAction(self)
else:
return visitor.visitChildren(self)
def action(self):
localctx = ANTLRv4Parser.ActionContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_action)
try:
self.enterOuterAlt(localctx, 1)
self.state = 224
self.match(ANTLRv4Parser.AT)
self.state = 228
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,13,self._ctx)
if la_ == 1:
self.state = 225
self.actionScopeName()
self.state = 226
self.match(ANTLRv4Parser.COLONCOLON)
self.state = 230
self.identifier()
self.state = 231
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionScopeNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def LEXER(self):
return self.getToken(ANTLRv4Parser.LEXER, 0)
def PARSER(self):
return self.getToken(ANTLRv4Parser.PARSER, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_actionScopeName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionScopeName" ):
listener.enterActionScopeName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionScopeName" ):
listener.exitActionScopeName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionScopeName" ):
return visitor.visitActionScopeName(self)
else:
return visitor.visitChildren(self)
def actionScopeName(self):
localctx = ANTLRv4Parser.ActionScopeNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_actionScopeName)
try:
self.state = 236
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 233
self.identifier()
pass
elif token in [ANTLRv4Parser.LEXER]:
self.enterOuterAlt(localctx, 2)
self.state = 234
self.match(ANTLRv4Parser.LEXER)
pass
elif token in [ANTLRv4Parser.PARSER]:
self.enterOuterAlt(localctx, 3)
self.state = 235
self.match(ANTLRv4Parser.PARSER)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BEGIN_ACTION(self):
return self.getToken(ANTLRv4Parser.BEGIN_ACTION, 0)
def END_ACTION(self):
return self.getToken(ANTLRv4Parser.END_ACTION, 0)
def ACTION_CONTENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.ACTION_CONTENT)
else:
return self.getToken(ANTLRv4Parser.ACTION_CONTENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_actionBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionBlock" ):
listener.enterActionBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionBlock" ):
listener.exitActionBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionBlock" ):
return visitor.visitActionBlock(self)
else:
return visitor.visitChildren(self)
def actionBlock(self):
localctx = ANTLRv4Parser.ActionBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_actionBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 238
self.match(ANTLRv4Parser.BEGIN_ACTION)
self.state = 242
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.ACTION_CONTENT:
self.state = 239
self.match(ANTLRv4Parser.ACTION_CONTENT)
self.state = 244
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 245
self.match(ANTLRv4Parser.END_ACTION)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgActionBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BEGIN_ARGUMENT(self):
return self.getToken(ANTLRv4Parser.BEGIN_ARGUMENT, 0)
def END_ARGUMENT(self):
return self.getToken(ANTLRv4Parser.END_ARGUMENT, 0)
def ARGUMENT_CONTENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.ARGUMENT_CONTENT)
else:
return self.getToken(ANTLRv4Parser.ARGUMENT_CONTENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_argActionBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArgActionBlock" ):
listener.enterArgActionBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArgActionBlock" ):
listener.exitArgActionBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgActionBlock" ):
return visitor.visitArgActionBlock(self)
else:
return visitor.visitChildren(self)
def argActionBlock(self):
localctx = ANTLRv4Parser.ArgActionBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_argActionBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 247
self.match(ANTLRv4Parser.BEGIN_ARGUMENT)
self.state = 251
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.ARGUMENT_CONTENT:
self.state = 248
self.match(ANTLRv4Parser.ARGUMENT_CONTENT)
self.state = 253
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 254
self.match(ANTLRv4Parser.END_ARGUMENT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ModeSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def MODE(self):
return self.getToken(ANTLRv4Parser.MODE, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def lexerRuleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerRuleSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleSpecContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_modeSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterModeSpec" ):
listener.enterModeSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitModeSpec" ):
listener.exitModeSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitModeSpec" ):
return visitor.visitModeSpec(self)
else:
return visitor.visitChildren(self)
def modeSpec(self):
localctx = ANTLRv4Parser.ModeSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_modeSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 256
self.match(ANTLRv4Parser.MODE)
self.state = 257
self.identifier()
self.state = 258
self.match(ANTLRv4Parser.SEMI)
self.state = 262
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.FRAGMENT))) != 0):
self.state = 259
self.lexerRuleSpec()
self.state = 264
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleSpecContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_rules
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRules" ):
listener.enterRules(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRules" ):
listener.exitRules(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRules" ):
return visitor.visitRules(self)
else:
return visitor.visitChildren(self)
def rules(self):
localctx = ANTLRv4Parser.RulesContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_rules)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 268
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.HEADER) | (1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0):
self.state = 265
self.ruleSpec()
self.state = 270
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._HEADER = None # Token
self.headers = list() # of Tokens
def parserRuleSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ParserRuleSpecContext,0)
def lexerRuleSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleSpecContext,0)
def HEADER(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.HEADER)
else:
return self.getToken(ANTLRv4Parser.HEADER, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleSpec" ):
listener.enterRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleSpec" ):
listener.exitRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleSpec" ):
return visitor.visitRuleSpec(self)
else:
return visitor.visitChildren(self)
def ruleSpec(self):
localctx = ANTLRv4Parser.RuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_ruleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 274
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.HEADER:
self.state = 271
localctx._HEADER = self.match(ANTLRv4Parser.HEADER)
localctx.headers.append(localctx._HEADER)
self.state = 276
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 279
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
self.state = 277
self.parserRuleSpec()
pass
elif la_ == 2:
self.state = 278
self.lexerRuleSpec()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParserRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.name = None # Token
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def ruleBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleBlockContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def exceptionGroup(self):
return self.getTypedRuleContext(ANTLRv4Parser.ExceptionGroupContext,0)
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def ruleModifiers(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleModifiersContext,0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def ruleReturns(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleReturnsContext,0)
def throwsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ThrowsSpecContext,0)
def localsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.LocalsSpecContext,0)
def rulePrequel(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RulePrequelContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RulePrequelContext,i)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_parserRuleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserRuleSpec" ):
listener.enterParserRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserRuleSpec" ):
listener.exitParserRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserRuleSpec" ):
return visitor.visitParserRuleSpec(self)
else:
return visitor.visitChildren(self)
def parserRuleSpec(self):
localctx = ANTLRv4Parser.ParserRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_parserRuleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 284
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 281
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 286
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 288
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0):
self.state = 287
self.ruleModifiers()
self.state = 290
localctx.name = self.match(ANTLRv4Parser.RULE_REF)
self.state = 292
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.BEGIN_ARGUMENT:
self.state = 291
self.argActionBlock()
self.state = 295
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.RETURNS:
self.state = 294
self.ruleReturns()
self.state = 298
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.THROWS:
self.state = 297
self.throwsSpec()
self.state = 301
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LOCALS:
self.state = 300
self.localsSpec()
self.state = 306
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OPTIONS or _la==ANTLRv4Parser.AT:
self.state = 303
self.rulePrequel()
self.state = 308
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 309
self.match(ANTLRv4Parser.COLON)
self.state = 310
self.ruleBlock()
self.state = 311
self.match(ANTLRv4Parser.SEMI)
self.state = 312
self.exceptionGroup()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExceptionGroupContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def exceptionHandler(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ExceptionHandlerContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ExceptionHandlerContext,i)
def finallyClause(self):
return self.getTypedRuleContext(ANTLRv4Parser.FinallyClauseContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_exceptionGroup
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExceptionGroup" ):
listener.enterExceptionGroup(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExceptionGroup" ):
listener.exitExceptionGroup(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExceptionGroup" ):
return visitor.visitExceptionGroup(self)
else:
return visitor.visitChildren(self)
def exceptionGroup(self):
localctx = ANTLRv4Parser.ExceptionGroupContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_exceptionGroup)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 317
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.CATCH:
self.state = 314
self.exceptionHandler()
self.state = 319
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 321
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.FINALLY:
self.state = 320
self.finallyClause()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExceptionHandlerContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CATCH(self):
return self.getToken(ANTLRv4Parser.CATCH, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_exceptionHandler
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExceptionHandler" ):
listener.enterExceptionHandler(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExceptionHandler" ):
listener.exitExceptionHandler(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExceptionHandler" ):
return visitor.visitExceptionHandler(self)
else:
return visitor.visitChildren(self)
def exceptionHandler(self):
localctx = ANTLRv4Parser.ExceptionHandlerContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_exceptionHandler)
try:
self.enterOuterAlt(localctx, 1)
self.state = 323
self.match(ANTLRv4Parser.CATCH)
self.state = 324
self.argActionBlock()
self.state = 325
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FinallyClauseContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def FINALLY(self):
return self.getToken(ANTLRv4Parser.FINALLY, 0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_finallyClause
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFinallyClause" ):
listener.enterFinallyClause(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFinallyClause" ):
listener.exitFinallyClause(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFinallyClause" ):
return visitor.visitFinallyClause(self)
else:
return visitor.visitChildren(self)
def finallyClause(self):
localctx = ANTLRv4Parser.FinallyClauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_finallyClause)
try:
self.enterOuterAlt(localctx, 1)
self.state = 327
self.match(ANTLRv4Parser.FINALLY)
self.state = 328
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulePrequelContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def ruleAction(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_rulePrequel
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRulePrequel" ):
listener.enterRulePrequel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRulePrequel" ):
listener.exitRulePrequel(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRulePrequel" ):
return visitor.visitRulePrequel(self)
else:
return visitor.visitChildren(self)
def rulePrequel(self):
localctx = ANTLRv4Parser.RulePrequelContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_rulePrequel)
try:
self.state = 332
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.OPTIONS]:
self.enterOuterAlt(localctx, 1)
self.state = 330
self.optionsSpec()
pass
elif token in [ANTLRv4Parser.AT]:
self.enterOuterAlt(localctx, 2)
self.state = 331
self.ruleAction()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleReturnsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RETURNS(self):
return self.getToken(ANTLRv4Parser.RETURNS, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleReturns
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleReturns" ):
listener.enterRuleReturns(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleReturns" ):
listener.exitRuleReturns(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleReturns" ):
return visitor.visitRuleReturns(self)
else:
return visitor.visitChildren(self)
def ruleReturns(self):
localctx = ANTLRv4Parser.RuleReturnsContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_ruleReturns)
try:
self.enterOuterAlt(localctx, 1)
self.state = 334
self.match(ANTLRv4Parser.RETURNS)
self.state = 335
self.argActionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ThrowsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def THROWS(self):
return self.getToken(ANTLRv4Parser.THROWS, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_throwsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterThrowsSpec" ):
listener.enterThrowsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitThrowsSpec" ):
listener.exitThrowsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitThrowsSpec" ):
return visitor.visitThrowsSpec(self)
else:
return visitor.visitChildren(self)
def throwsSpec(self):
localctx = ANTLRv4Parser.ThrowsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_throwsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 337
self.match(ANTLRv4Parser.THROWS)
self.state = 338
self.identifier()
self.state = 343
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 339
self.match(ANTLRv4Parser.COMMA)
self.state = 340
self.identifier()
self.state = 345
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LocalsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LOCALS(self):
return self.getToken(ANTLRv4Parser.LOCALS, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_localsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLocalsSpec" ):
listener.enterLocalsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLocalsSpec" ):
listener.exitLocalsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLocalsSpec" ):
return visitor.visitLocalsSpec(self)
else:
return visitor.visitChildren(self)
def localsSpec(self):
localctx = ANTLRv4Parser.LocalsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_localsSpec)
try:
self.enterOuterAlt(localctx, 1)
self.state = 346
self.match(ANTLRv4Parser.LOCALS)
self.state = 347
self.argActionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleActionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AT(self):
return self.getToken(ANTLRv4Parser.AT, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleAction
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleAction" ):
listener.enterRuleAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleAction" ):
listener.exitRuleAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleAction" ):
return visitor.visitRuleAction(self)
else:
return visitor.visitChildren(self)
def ruleAction(self):
localctx = ANTLRv4Parser.RuleActionContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_ruleAction)
try:
self.enterOuterAlt(localctx, 1)
self.state = 349
self.match(ANTLRv4Parser.AT)
self.state = 350
self.identifier()
self.state = 351
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleModifiersContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleModifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleModifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleModifierContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleModifiers
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleModifiers" ):
listener.enterRuleModifiers(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleModifiers" ):
listener.exitRuleModifiers(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleModifiers" ):
return visitor.visitRuleModifiers(self)
else:
return visitor.visitChildren(self)
def ruleModifiers(self):
localctx = ANTLRv4Parser.RuleModifiersContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_ruleModifiers)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 354
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 353
self.ruleModifier()
self.state = 356
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleModifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def PUBLIC(self):
return self.getToken(ANTLRv4Parser.PUBLIC, 0)
def PRIVATE(self):
return self.getToken(ANTLRv4Parser.PRIVATE, 0)
def PROTECTED(self):
return self.getToken(ANTLRv4Parser.PROTECTED, 0)
def FRAGMENT(self):
return self.getToken(ANTLRv4Parser.FRAGMENT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleModifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleModifier" ):
listener.enterRuleModifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleModifier" ):
listener.exitRuleModifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleModifier" ):
return visitor.visitRuleModifier(self)
else:
return visitor.visitChildren(self)
def ruleModifier(self):
localctx = ANTLRv4Parser.RuleModifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_ruleModifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 358
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleAltListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleBlock" ):
listener.enterRuleBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleBlock" ):
listener.exitRuleBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleBlock" ):
return visitor.visitRuleBlock(self)
else:
return visitor.visitChildren(self)
def ruleBlock(self):
localctx = ANTLRv4Parser.RuleBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_ruleBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 360
self.ruleAltList()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleAltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._labeledAlt = None # LabeledAltContext
self.alts = list() # of LabeledAltContexts
def labeledAlt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LabeledAltContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LabeledAltContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleAltList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleAltList" ):
listener.enterRuleAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleAltList" ):
listener.exitRuleAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleAltList" ):
return visitor.visitRuleAltList(self)
else:
return visitor.visitChildren(self)
def ruleAltList(self):
localctx = ANTLRv4Parser.RuleAltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_ruleAltList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 362
localctx._labeledAlt = self.labeledAlt()
localctx.alts.append(localctx._labeledAlt)
self.state = 367
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 363
self.match(ANTLRv4Parser.OR)
self.state = 364
localctx._labeledAlt = self.labeledAlt()
localctx.alts.append(localctx._labeledAlt)
self.state = 369
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledAltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def alternative(self):
return self.getTypedRuleContext(ANTLRv4Parser.AlternativeContext,0)
def POUND(self):
return self.getToken(ANTLRv4Parser.POUND, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledAlt
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledAlt" ):
listener.enterLabeledAlt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledAlt" ):
listener.exitLabeledAlt(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledAlt" ):
return visitor.visitLabeledAlt(self)
else:
return visitor.visitChildren(self)
def labeledAlt(self):
localctx = ANTLRv4Parser.LabeledAltContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_labeledAlt)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 370
self.alternative()
self.state = 373
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.POUND:
self.state = 371
self.match(ANTLRv4Parser.POUND)
self.state = 372
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.frag = None # Token
self.name = None # Token
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def lexerRuleBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleBlockContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def FRAGMENT(self):
return self.getToken(ANTLRv4Parser.FRAGMENT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerRuleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerRuleSpec" ):
listener.enterLexerRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerRuleSpec" ):
listener.exitLexerRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleSpec" ):
return visitor.visitLexerRuleSpec(self)
else:
return visitor.visitChildren(self)
def lexerRuleSpec(self):
localctx = ANTLRv4Parser.LexerRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_lexerRuleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 378
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 375
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 380
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 382
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.FRAGMENT:
self.state = 381
localctx.frag = self.match(ANTLRv4Parser.FRAGMENT)
self.state = 384
localctx.name = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 385
self.match(ANTLRv4Parser.COLON)
self.state = 386
self.lexerRuleBlock()
self.state = 387
self.match(ANTLRv4Parser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerRuleBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerRuleBlock" ):
listener.enterLexerRuleBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerRuleBlock" ):
listener.exitLexerRuleBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleBlock" ):
return visitor.visitLexerRuleBlock(self)
else:
return visitor.visitChildren(self)
def lexerRuleBlock(self):
localctx = ANTLRv4Parser.LexerRuleBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_lexerRuleBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 389
self.lexerAltList()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._lexerAlt = None # LexerAltContext
self.alts = list() # of LexerAltContexts
def lexerAlt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerAltContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAltList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAltList" ):
listener.enterLexerAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAltList" ):
listener.exitLexerAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAltList" ):
return visitor.visitLexerAltList(self)
else:
return visitor.visitChildren(self)
def lexerAltList(self):
localctx = ANTLRv4Parser.LexerAltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_lexerAltList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 391
localctx._lexerAlt = self.lexerAlt()
localctx.alts.append(localctx._lexerAlt)
self.state = 396
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 392
self.match(ANTLRv4Parser.OR)
self.state = 393
localctx._lexerAlt = self.lexerAlt()
localctx.alts.append(localctx._lexerAlt)
self.state = 398
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerElements(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerElementsContext,0)
def lexerCommands(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandsContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAlt
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAlt" ):
listener.enterLexerAlt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAlt" ):
listener.exitLexerAlt(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAlt" ):
return visitor.visitLexerAlt(self)
else:
return visitor.visitChildren(self)
def lexerAlt(self):
localctx = ANTLRv4Parser.LexerAltContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_lexerAlt)
self._la = 0 # Token type
try:
self.state = 404
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.LEXER_CHAR_SET, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.LPAREN, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.enterOuterAlt(localctx, 1)
self.state = 399
self.lexerElements()
self.state = 401
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.RARROW:
self.state = 400
self.lexerCommands()
pass
elif token in [ANTLRv4Parser.SEMI, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._lexerElement = None # LexerElementContext
self.elements = list() # of LexerElementContexts
def lexerElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerElementContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerElements
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElements" ):
listener.enterLexerElements(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElements" ):
listener.exitLexerElements(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElements" ):
return visitor.visitLexerElements(self)
else:
return visitor.visitChildren(self)
def lexerElements(self):
localctx = ANTLRv4Parser.LexerElementsContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_lexerElements)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 407
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 406
localctx._lexerElement = self.lexerElement()
localctx.elements.append(localctx._lexerElement)
self.state = 409
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.LEXER_CHAR_SET) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.STRING_LITERAL) | (1 << ANTLRv4Parser.BEGIN_ACTION) | (1 << ANTLRv4Parser.LPAREN) | (1 << ANTLRv4Parser.DOT) | (1 << ANTLRv4Parser.NOT))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LexerElementLabeledContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LabeledLexerElementContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def labeledLexerElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.LabeledLexerElementContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementLabeled" ):
listener.enterLexerElementLabeled(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementLabeled" ):
listener.exitLexerElementLabeled(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementLabeled" ):
return visitor.visitLexerElementLabeled(self)
else:
return visitor.visitChildren(self)
class LexerElementBlockContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LexerBlockContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def lexerBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerBlockContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementBlock" ):
listener.enterLexerElementBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementBlock" ):
listener.exitLexerElementBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementBlock" ):
return visitor.visitLexerElementBlock(self)
else:
return visitor.visitChildren(self)
class LexerElementActionContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def QUESTION(self):
return self.getToken(ANTLRv4Parser.QUESTION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementAction" ):
listener.enterLexerElementAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementAction" ):
listener.exitLexerElementAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementAction" ):
return visitor.visitLexerElementAction(self)
else:
return visitor.visitChildren(self)
class LexerElementAtomContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LexerAtomContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def lexerAtom(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAtomContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementAtom" ):
listener.enterLexerElementAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementAtom" ):
listener.exitLexerElementAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementAtom" ):
return visitor.visitLexerElementAtom(self)
else:
return visitor.visitChildren(self)
def lexerElement(self):
localctx = ANTLRv4Parser.LexerElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_lexerElement)
self._la = 0 # Token type
try:
self.state = 427
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,45,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.LexerElementLabeledContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 411
localctx.value = self.labeledLexerElement()
self.state = 413
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 412
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.LexerElementAtomContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 415
localctx.value = self.lexerAtom()
self.state = 417
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 416
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.LexerElementBlockContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 419
localctx.value = self.lexerBlock()
self.state = 421
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 420
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.LexerElementActionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 423
self.actionBlock()
self.state = 425
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 424
self.match(ANTLRv4Parser.QUESTION)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledLexerElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def PLUS_ASSIGN(self):
return self.getToken(ANTLRv4Parser.PLUS_ASSIGN, 0)
def lexerAtom(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAtomContext,0)
def lexerBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledLexerElement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledLexerElement" ):
listener.enterLabeledLexerElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledLexerElement" ):
listener.exitLabeledLexerElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledLexerElement" ):
return visitor.visitLabeledLexerElement(self)
else:
return visitor.visitChildren(self)
def labeledLexerElement(self):
localctx = ANTLRv4Parser.LabeledLexerElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_labeledLexerElement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 429
self.identifier()
self.state = 430
_la = self._input.LA(1)
if not(_la==ANTLRv4Parser.ASSIGN or _la==ANTLRv4Parser.PLUS_ASSIGN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 433
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.LEXER_CHAR_SET, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.state = 431
self.lexerAtom()
pass
elif token in [ANTLRv4Parser.LPAREN]:
self.state = 432
self.lexerBlock()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def lexerAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltListContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerBlock" ):
listener.enterLexerBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerBlock" ):
listener.exitLexerBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerBlock" ):
return visitor.visitLexerBlock(self)
else:
return visitor.visitChildren(self)
def lexerBlock(self):
localctx = ANTLRv4Parser.LexerBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_lexerBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 435
self.match(ANTLRv4Parser.LPAREN)
self.state = 436
self.lexerAltList()
self.state = 437
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RARROW(self):
return self.getToken(ANTLRv4Parser.RARROW, 0)
def lexerCommand(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerCommandContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommands
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommands" ):
listener.enterLexerCommands(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommands" ):
listener.exitLexerCommands(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommands" ):
return visitor.visitLexerCommands(self)
else:
return visitor.visitChildren(self)
def lexerCommands(self):
localctx = ANTLRv4Parser.LexerCommandsContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_lexerCommands)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 439
self.match(ANTLRv4Parser.RARROW)
self.state = 440
self.lexerCommand()
self.state = 445
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 441
self.match(ANTLRv4Parser.COMMA)
self.state = 442
self.lexerCommand()
self.state = 447
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerCommandName(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandNameContext,0)
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def lexerCommandExpr(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandExprContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommand
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommand" ):
listener.enterLexerCommand(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommand" ):
listener.exitLexerCommand(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommand" ):
return visitor.visitLexerCommand(self)
else:
return visitor.visitChildren(self)
def lexerCommand(self):
localctx = ANTLRv4Parser.LexerCommandContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_lexerCommand)
try:
self.state = 454
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,48,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 448
self.lexerCommandName()
self.state = 449
self.match(ANTLRv4Parser.LPAREN)
self.state = 450
self.lexerCommandExpr()
self.state = 451
self.match(ANTLRv4Parser.RPAREN)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 453
self.lexerCommandName()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def MODE(self):
return self.getToken(ANTLRv4Parser.MODE, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommandName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommandName" ):
listener.enterLexerCommandName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommandName" ):
listener.exitLexerCommandName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommandName" ):
return visitor.visitLexerCommandName(self)
else:
return visitor.visitChildren(self)
def lexerCommandName(self):
localctx = ANTLRv4Parser.LexerCommandNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_lexerCommandName)
try:
self.state = 458
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 456
self.identifier()
pass
elif token in [ANTLRv4Parser.MODE]:
self.enterOuterAlt(localctx, 2)
self.state = 457
self.match(ANTLRv4Parser.MODE)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def INT(self):
return self.getToken(ANTLRv4Parser.INT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommandExpr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommandExpr" ):
listener.enterLexerCommandExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommandExpr" ):
listener.exitLexerCommandExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommandExpr" ):
return visitor.visitLexerCommandExpr(self)
else:
return visitor.visitChildren(self)
def lexerCommandExpr(self):
localctx = ANTLRv4Parser.LexerCommandExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_lexerCommandExpr)
try:
self.state = 462
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 460
self.identifier()
pass
elif token in [ANTLRv4Parser.INT]:
self.enterOuterAlt(localctx, 2)
self.state = 461
self.match(ANTLRv4Parser.INT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._alternative = None # AlternativeContext
self.alts = list() # of AlternativeContexts
def alternative(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.AlternativeContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.AlternativeContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_altList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAltList" ):
listener.enterAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAltList" ):
listener.exitAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAltList" ):
return visitor.visitAltList(self)
else:
return visitor.visitChildren(self)
def altList(self):
localctx = ANTLRv4Parser.AltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_altList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 464
localctx._alternative = self.alternative()
localctx.alts.append(localctx._alternative)
self.state = 469
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 465
self.match(ANTLRv4Parser.OR)
self.state = 466
localctx._alternative = self.alternative()
localctx.alts.append(localctx._alternative)
self.state = 471
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AlternativeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._element = None # ElementContext
self.elements = list() # of ElementContexts
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def element(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ElementContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_alternative
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAlternative" ):
listener.enterAlternative(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAlternative" ):
listener.exitAlternative(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAlternative" ):
return visitor.visitAlternative(self)
else:
return visitor.visitChildren(self)
def alternative(self):
localctx = ANTLRv4Parser.AlternativeContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_alternative)
self._la = 0 # Token type
try:
self.state = 481
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.LPAREN, ANTLRv4Parser.LT, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.enterOuterAlt(localctx, 1)
self.state = 473
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 472
self.elementOptions()
self.state = 476
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 475
localctx._element = self.element()
localctx.elements.append(localctx._element)
self.state = 478
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.STRING_LITERAL) | (1 << ANTLRv4Parser.BEGIN_ACTION) | (1 << ANTLRv4Parser.LPAREN) | (1 << ANTLRv4Parser.DOT) | (1 << ANTLRv4Parser.NOT))) != 0)):
break
pass
elif token in [ANTLRv4Parser.SEMI, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR, ANTLRv4Parser.POUND]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_element
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ParserElementLabeledContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # LabeledElementContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def labeledElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.LabeledElementContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementLabeled" ):
listener.enterParserElementLabeled(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementLabeled" ):
listener.exitParserElementLabeled(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementLabeled" ):
return visitor.visitParserElementLabeled(self)
else:
return visitor.visitChildren(self)
class ParserElementBlockContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # BlockContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def block(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementBlock" ):
listener.enterParserElementBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementBlock" ):
listener.exitParserElementBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementBlock" ):
return visitor.visitParserElementBlock(self)
else:
return visitor.visitChildren(self)
class ParserElementAtomContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # AtomContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def atom(self):
return self.getTypedRuleContext(ANTLRv4Parser.AtomContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementAtom" ):
listener.enterParserElementAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementAtom" ):
listener.exitParserElementAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementAtom" ):
return visitor.visitParserElementAtom(self)
else:
return visitor.visitChildren(self)
class ParserInlineDocContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def DOC_COMMENT(self):
return self.getToken(ANTLRv4Parser.DOC_COMMENT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserInlineDoc" ):
listener.enterParserInlineDoc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserInlineDoc" ):
listener.exitParserInlineDoc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserInlineDoc" ):
return visitor.visitParserInlineDoc(self)
else:
return visitor.visitChildren(self)
class ParserElementActionContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def QUESTION(self):
return self.getToken(ANTLRv4Parser.QUESTION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementAction" ):
listener.enterParserElementAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementAction" ):
listener.exitParserElementAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementAction" ):
return visitor.visitParserElementAction(self)
else:
return visitor.visitChildren(self)
def element(self):
localctx = ANTLRv4Parser.ElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_element)
self._la = 0 # Token type
try:
self.state = 500
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,59,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.ParserElementLabeledContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 483
localctx.value = self.labeledElement()
self.state = 485
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 484
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.ParserElementAtomContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 487
localctx.value = self.atom()
self.state = 489
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 488
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.ParserElementBlockContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 491
localctx.value = self.block()
self.state = 493
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 492
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.ParserElementActionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 495
self.actionBlock()
self.state = 497
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 496
self.match(ANTLRv4Parser.QUESTION)
pass
elif la_ == 5:
localctx = ANTLRv4Parser.ParserInlineDocContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 499
localctx.value = self.match(ANTLRv4Parser.DOC_COMMENT)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def PLUS_ASSIGN(self):
return self.getToken(ANTLRv4Parser.PLUS_ASSIGN, 0)
def atom(self):
return self.getTypedRuleContext(ANTLRv4Parser.AtomContext,0)
def block(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledElement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledElement" ):
listener.enterLabeledElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledElement" ):
listener.exitLabeledElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledElement" ):
return visitor.visitLabeledElement(self)
else:
return visitor.visitChildren(self)
def labeledElement(self):
localctx = ANTLRv4Parser.LabeledElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_labeledElement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 502
self.identifier()
self.state = 503
_la = self._input.LA(1)
if not(_la==ANTLRv4Parser.ASSIGN or _la==ANTLRv4Parser.PLUS_ASSIGN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 506
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.state = 504
self.atom()
pass
elif token in [ANTLRv4Parser.LPAREN]:
self.state = 505
self.block()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EbnfSuffixContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def QUESTION(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.QUESTION)
else:
return self.getToken(ANTLRv4Parser.QUESTION, i)
def STAR(self):
return self.getToken(ANTLRv4Parser.STAR, 0)
def PLUS(self):
return self.getToken(ANTLRv4Parser.PLUS, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ebnfSuffix
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEbnfSuffix" ):
listener.enterEbnfSuffix(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEbnfSuffix" ):
listener.exitEbnfSuffix(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEbnfSuffix" ):
return visitor.visitEbnfSuffix(self)
else:
return visitor.visitChildren(self)
def ebnfSuffix(self):
localctx = ANTLRv4Parser.EbnfSuffixContext(self, self._ctx, self.state)
self.enterRule(localctx, 96, self.RULE_ebnfSuffix)
self._la = 0 # Token type
try:
self.state = 520
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.QUESTION]:
self.enterOuterAlt(localctx, 1)
self.state = 508
self.match(ANTLRv4Parser.QUESTION)
self.state = 510
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 509
self.match(ANTLRv4Parser.QUESTION)
pass
elif token in [ANTLRv4Parser.STAR]:
self.enterOuterAlt(localctx, 2)
self.state = 512
self.match(ANTLRv4Parser.STAR)
self.state = 514
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 513
self.match(ANTLRv4Parser.QUESTION)
pass
elif token in [ANTLRv4Parser.PLUS]:
self.enterOuterAlt(localctx, 3)
self.state = 516
self.match(ANTLRv4Parser.PLUS)
self.state = 518
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 517
self.match(ANTLRv4Parser.QUESTION)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAtom
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LexerAtomNotContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def notSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomNot" ):
listener.enterLexerAtomNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomNot" ):
listener.exitLexerAtomNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomNot" ):
return visitor.visitLexerAtomNot(self)
else:
return visitor.visitChildren(self)
class LexerAtomRangeContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def characterRange(self):
return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomRange" ):
listener.enterLexerAtomRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomRange" ):
listener.exitLexerAtomRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomRange" ):
return visitor.visitLexerAtomRange(self)
else:
return visitor.visitChildren(self)
class LexerAtomCharSetContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def LEXER_CHAR_SET(self):
return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomCharSet" ):
listener.enterLexerAtomCharSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomCharSet" ):
listener.exitLexerAtomCharSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomCharSet" ):
return visitor.visitLexerAtomCharSet(self)
else:
return visitor.visitChildren(self)
class LexerAtomWildcardContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(ANTLRv4Parser.DOT, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomWildcard" ):
listener.enterLexerAtomWildcard(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomWildcard" ):
listener.exitLexerAtomWildcard(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomWildcard" ):
return visitor.visitLexerAtomWildcard(self)
else:
return visitor.visitChildren(self)
class LexerAtomTerminalContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def terminal(self):
return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomTerminal" ):
listener.enterLexerAtomTerminal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomTerminal" ):
listener.exitLexerAtomTerminal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomTerminal" ):
return visitor.visitLexerAtomTerminal(self)
else:
return visitor.visitChildren(self)
class LexerAtomDocContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def DOC_COMMENT(self):
return self.getToken(ANTLRv4Parser.DOC_COMMENT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomDoc" ):
listener.enterLexerAtomDoc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomDoc" ):
listener.exitLexerAtomDoc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomDoc" ):
return visitor.visitLexerAtomDoc(self)
else:
return visitor.visitChildren(self)
def lexerAtom(self):
localctx = ANTLRv4Parser.LexerAtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_lexerAtom)
self._la = 0 # Token type
try:
self.state = 531
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,66,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.LexerAtomRangeContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 522
self.characterRange()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.LexerAtomTerminalContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 523
self.terminal()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.LexerAtomNotContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 524
self.notSet()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.LexerAtomCharSetContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 525
localctx.value = self.match(ANTLRv4Parser.LEXER_CHAR_SET)
pass
elif la_ == 5:
localctx = ANTLRv4Parser.LexerAtomWildcardContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 526
self.match(ANTLRv4Parser.DOT)
self.state = 528
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 527
self.elementOptions()
pass
elif la_ == 6:
localctx = ANTLRv4Parser.LexerAtomDocContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 530
localctx.value = self.match(ANTLRv4Parser.DOC_COMMENT)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_atom
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class AtomTerminalContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def terminal(self):
return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomTerminal" ):
listener.enterAtomTerminal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomTerminal" ):
listener.exitAtomTerminal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomTerminal" ):
return visitor.visitAtomTerminal(self)
else:
return visitor.visitChildren(self)
class AtomWildcardContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(ANTLRv4Parser.DOT, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomWildcard" ):
listener.enterAtomWildcard(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomWildcard" ):
listener.exitAtomWildcard(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomWildcard" ):
return visitor.visitAtomWildcard(self)
else:
return visitor.visitChildren(self)
class AtomRuleRefContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def ruleref(self):
return self.getTypedRuleContext(ANTLRv4Parser.RulerefContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomRuleRef" ):
listener.enterAtomRuleRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomRuleRef" ):
listener.exitAtomRuleRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomRuleRef" ):
return visitor.visitAtomRuleRef(self)
else:
return visitor.visitChildren(self)
class AtomNotContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def notSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomNot" ):
listener.enterAtomNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomNot" ):
listener.exitAtomNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomNot" ):
return visitor.visitAtomNot(self)
else:
return visitor.visitChildren(self)
def atom(self):
localctx = ANTLRv4Parser.AtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_atom)
self._la = 0 # Token type
try:
self.state = 540
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.AtomTerminalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 533
self.terminal()
pass
elif token in [ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.AtomRuleRefContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 534
self.ruleref()
pass
elif token in [ANTLRv4Parser.NOT]:
localctx = ANTLRv4Parser.AtomNotContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 535
self.notSet()
pass
elif token in [ANTLRv4Parser.DOT]:
localctx = ANTLRv4Parser.AtomWildcardContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 536
self.match(ANTLRv4Parser.DOT)
self.state = 538
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 537
self.elementOptions()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NotSetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_notSet
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class NotBlockContext(NotSetContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.NotSetContext
super().__init__(parser)
self.value = None # BlockSetContext
self.copyFrom(ctx)
def NOT(self):
return self.getToken(ANTLRv4Parser.NOT, 0)
def blockSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotBlock" ):
listener.enterNotBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotBlock" ):
listener.exitNotBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotBlock" ):
return visitor.visitNotBlock(self)
else:
return visitor.visitChildren(self)
class NotElementContext(NotSetContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.NotSetContext
super().__init__(parser)
self.value = None # SetElementContext
self.copyFrom(ctx)
def NOT(self):
return self.getToken(ANTLRv4Parser.NOT, 0)
def setElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotElement" ):
listener.enterNotElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotElement" ):
listener.exitNotElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotElement" ):
return visitor.visitNotElement(self)
else:
return visitor.visitChildren(self)
def notSet(self):
localctx = ANTLRv4Parser.NotSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_notSet)
try:
self.state = 546
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,69,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.NotElementContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 542
self.match(ANTLRv4Parser.NOT)
self.state = 543
localctx.value = self.setElement()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.NotBlockContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 544
self.match(ANTLRv4Parser.NOT)
self.state = 545
localctx.value = self.blockSet()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockSetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._setElement = None # SetElementContext
self.elements = list() # of SetElementContexts
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def setElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.SetElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_blockSet
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockSet" ):
listener.enterBlockSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockSet" ):
listener.exitBlockSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlockSet" ):
return visitor.visitBlockSet(self)
else:
return visitor.visitChildren(self)
def blockSet(self):
localctx = ANTLRv4Parser.BlockSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_blockSet)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 548
self.match(ANTLRv4Parser.LPAREN)
self.state = 549
localctx._setElement = self.setElement()
localctx.elements.append(localctx._setElement)
self.state = 554
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 550
self.match(ANTLRv4Parser.OR)
self.state = 551
localctx._setElement = self.setElement()
localctx.elements.append(localctx._setElement)
self.state = 556
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 557
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_setElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SetElementRefContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementRef" ):
listener.enterSetElementRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementRef" ):
listener.exitSetElementRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementRef" ):
return visitor.visitSetElementRef(self)
else:
return visitor.visitChildren(self)
class SetElementRangeContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.copyFrom(ctx)
def characterRange(self):
return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementRange" ):
listener.enterSetElementRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementRange" ):
listener.exitSetElementRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementRange" ):
return visitor.visitSetElementRange(self)
else:
return visitor.visitChildren(self)
class SetElementLitContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementLit" ):
listener.enterSetElementLit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementLit" ):
listener.exitSetElementLit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementLit" ):
return visitor.visitSetElementLit(self)
else:
return visitor.visitChildren(self)
class SetElementCharSetContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def LEXER_CHAR_SET(self):
return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementCharSet" ):
listener.enterSetElementCharSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementCharSet" ):
listener.exitSetElementCharSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementCharSet" ):
return visitor.visitSetElementCharSet(self)
else:
return visitor.visitChildren(self)
def setElement(self):
localctx = ANTLRv4Parser.SetElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_setElement)
self._la = 0 # Token type
try:
self.state = 569
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,73,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.SetElementRefContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 559
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 561
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 560
self.elementOptions()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.SetElementLitContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 563
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 565
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 564
self.elementOptions()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.SetElementRangeContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 567
self.characterRange()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.SetElementCharSetContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 568
localctx.value = self.match(ANTLRv4Parser.LEXER_CHAR_SET)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def altList(self):
return self.getTypedRuleContext(ANTLRv4Parser.AltListContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def ruleAction(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleActionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock" ):
listener.enterBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock" ):
listener.exitBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlock" ):
return visitor.visitBlock(self)
else:
return visitor.visitChildren(self)
def block(self):
localctx = ANTLRv4Parser.BlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_block)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 571
self.match(ANTLRv4Parser.LPAREN)
self.state = 582
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.COLON) | (1 << ANTLRv4Parser.AT))) != 0):
self.state = 573
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.OPTIONS:
self.state = 572
self.optionsSpec()
self.state = 578
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.AT:
self.state = 575
self.ruleAction()
self.state = 580
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 581
self.match(ANTLRv4Parser.COLON)
self.state = 584
self.altList()
self.state = 585
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulerefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None # Token
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleref
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleref" ):
listener.enterRuleref(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleref" ):
listener.exitRuleref(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleref" ):
return visitor.visitRuleref(self)
else:
return visitor.visitChildren(self)
def ruleref(self):
localctx = ANTLRv4Parser.RulerefContext(self, self._ctx, self.state)
self.enterRule(localctx, 110, self.RULE_ruleref)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 587
localctx.value = self.match(ANTLRv4Parser.RULE_REF)
self.state = 589
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.BEGIN_ARGUMENT:
self.state = 588
self.argActionBlock()
self.state = 592
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 591
self.elementOptions()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CharacterRangeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.start = None # Token
self.end = None # Token
def RANGE(self):
return self.getToken(ANTLRv4Parser.RANGE, 0)
def STRING_LITERAL(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.STRING_LITERAL)
else:
return self.getToken(ANTLRv4Parser.STRING_LITERAL, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_characterRange
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCharacterRange" ):
listener.enterCharacterRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCharacterRange" ):
listener.exitCharacterRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCharacterRange" ):
return visitor.visitCharacterRange(self)
else:
return visitor.visitChildren(self)
def characterRange(self):
localctx = ANTLRv4Parser.CharacterRangeContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_characterRange)
try:
self.enterOuterAlt(localctx, 1)
self.state = 594
localctx.start = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 595
self.match(ANTLRv4Parser.RANGE)
self.state = 596
localctx.end = self.match(ANTLRv4Parser.STRING_LITERAL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TerminalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_terminal
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class TerminalRefContext(TerminalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.TerminalContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerminalRef" ):
listener.enterTerminalRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerminalRef" ):
listener.exitTerminalRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerminalRef" ):
return visitor.visitTerminalRef(self)
else:
return visitor.visitChildren(self)
class TerminalLitContext(TerminalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.TerminalContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerminalLit" ):
listener.enterTerminalLit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerminalLit" ):
listener.exitTerminalLit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerminalLit" ):
return visitor.visitTerminalLit(self)
else:
return visitor.visitChildren(self)
def terminal(self):
localctx = ANTLRv4Parser.TerminalContext(self, self._ctx, self.state)
self.enterRule(localctx, 114, self.RULE_terminal)
self._la = 0 # Token type
try:
self.state = 606
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF]:
localctx = ANTLRv4Parser.TerminalRefContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 598
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 600
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 599
self.elementOptions()
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.TerminalLitContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 602
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 604
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 603
self.elementOptions()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementOptionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LT(self):
return self.getToken(ANTLRv4Parser.LT, 0)
def elementOption(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ElementOptionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionContext,i)
def GT(self):
return self.getToken(ANTLRv4Parser.GT, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_elementOptions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElementOptions" ):
listener.enterElementOptions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElementOptions" ):
listener.exitElementOptions(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitElementOptions" ):
return visitor.visitElementOptions(self)
else:
return visitor.visitChildren(self)
def elementOptions(self):
localctx = ANTLRv4Parser.ElementOptionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_elementOptions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 608
self.match(ANTLRv4Parser.LT)
self.state = 609
self.elementOption()
self.state = 614
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 610
self.match(ANTLRv4Parser.COMMA)
self.state = 611
self.elementOption()
self.state = 616
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 617
self.match(ANTLRv4Parser.GT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementOptionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_elementOption
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElementOption" ):
listener.enterElementOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElementOption" ):
listener.exitElementOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitElementOption" ):
return visitor.visitElementOption(self)
else:
return visitor.visitChildren(self)
def elementOption(self):
localctx = ANTLRv4Parser.ElementOptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 118, self.RULE_elementOption)
try:
self.state = 626
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,84,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 619
self.identifier()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 620
self.identifier()
self.state = 621
self.match(ANTLRv4Parser.ASSIGN)
self.state = 624
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.state = 622
self.identifier()
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
self.state = 623
self.match(ANTLRv4Parser.STRING_LITERAL)
pass
else:
raise NoViableAltException(self)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_identifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RuleRefIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.IdentifierContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleRefIdentifier" ):
listener.enterRuleRefIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleRefIdentifier" ):
listener.exitRuleRefIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleRefIdentifier" ):
return visitor.visitRuleRefIdentifier(self)
else:
return visitor.visitChildren(self)
class TokenRefIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.IdentifierContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTokenRefIdentifier" ):
listener.enterTokenRefIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTokenRefIdentifier" ):
listener.exitTokenRefIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTokenRefIdentifier" ):
return visitor.visitTokenRefIdentifier(self)
else:
return visitor.visitChildren(self)
def identifier(self):
localctx = ANTLRv4Parser.IdentifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_identifier)
try:
self.state = 630
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.RuleRefIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 628
localctx.value = self.match(ANTLRv4Parser.RULE_REF)
pass
elif token in [ANTLRv4Parser.TOKEN_REF]:
localctx = ANTLRv4Parser.TokenRefIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 629
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| [
"io.StringIO"
] | [((134, 144), 'io.StringIO', 'StringIO', ([], {}), '()\n', (142, 144), False, 'from io import StringIO\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Specifies static assets (CSS, JS) required by the CATMAID front-end.
This module specifies all the static files that are required by the
synapsesuggestor front-end.
"""
from collections import OrderedDict
JAVASCRIPT = OrderedDict()
JAVASCRIPT['synapsesuggestor'] = {
'source_filenames': (
'synapsesuggestor/js/widgets/synapse-detection-table.js',
),
'output_filename': 'js/synapsesuggestor.js'
}
| [
"collections.OrderedDict"
] | [((289, 302), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (300, 302), False, 'from collections import OrderedDict\n')] |
from pathlib import Path
import click
import cligj
import geojson
import mercantile
from shapely.geometry import asShape, box
from shapely.ops import split
@click.command()
@cligj.features_in_arg
@click.option(
'-z',
'--min-zoom',
type=int,
required=True,
help='Min zoom level to create tiles for',
)
@click.option(
'-Z',
'--max-zoom',
type=int,
required=True,
help='Max zoom level to create tiles for (inclusive)',
)
@click.option(
'-d',
'--tile-dir',
type=click.Path(file_okay=False, dir_okay=True, writable=True))
@click.option(
'--allowed-geom-type',
type=str,
required=False,
multiple=True,
default=[],
help='Geometry types to keep in exported GeoJSON features.')
def cut_geojson(features, min_zoom, max_zoom, tile_dir, allowed_geom_type):
"""Cut GeoJSON features into xyz tiles
"""
geometry_types = [
'Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon',
'MultiPolygon']
if not all(t in geometry_types for t in allowed_geom_type):
raise ValueError(f'allowed_geom_type must be one of: {geometry_types}')
tile_dir = Path(tile_dir)
for feature in features:
geometry = asShape(feature['geometry'])
tiles = find_tiles(geometry, min_zoom, max_zoom)
for tile in tiles:
clipped_geometries = clip_geometry_to_tile(geometry, tile)
new_features = []
for clipped_geometry in clipped_geometries:
if allowed_geom_type:
geom_type = clipped_geometry.type
if geom_type not in allowed_geom_type:
print(f'Skipping feature of type: {geom_type}')
continue
new_features.append(
geojson.Feature(
geometry=clipped_geometry,
properties=feature['properties']))
# Write feature to tile_dir
this_tile_dir = (tile_dir / str(tile.z) / str(tile.x))
this_tile_dir.mkdir(parents=True, exist_ok=True)
with open(this_tile_dir / f'{str(tile.y)}.geojson', 'a') as f:
for new_feature in new_features:
f.write(geojson.dumps(new_feature, separators=(',', ':')))
f.write('\n')
def find_tiles(geometry, min_zoom, max_zoom):
assert min_zoom <= max_zoom, 'min zoom must be <= max zoom'
selected_tiles = []
bound_tiles = mercantile.tiles(
*geometry.bounds, zooms=range(min_zoom, max_zoom + 1))
for tile in bound_tiles:
if box(*mercantile.bounds(tile)).intersects(geometry):
selected_tiles.append(tile)
return selected_tiles
def clip_geometry_to_tile(geometry, tile):
tile_geom = box(*mercantile.bounds(tile))
# Geometry collection of split objects
split_gc = split(geometry, tile_geom)
return [g for g in split_gc if tile_geom.contains(g)]
if __name__ == '__main__':
cut_geojson()
| [
"geojson.dumps",
"shapely.ops.split",
"pathlib.Path",
"click.option",
"shapely.geometry.asShape",
"geojson.Feature",
"click.Path",
"click.command",
"mercantile.bounds"
] | [((160, 175), 'click.command', 'click.command', ([], {}), '()\n', (173, 175), False, 'import click\n'), ((200, 305), 'click.option', 'click.option', (['"""-z"""', '"""--min-zoom"""'], {'type': 'int', 'required': '(True)', 'help': '"""Min zoom level to create tiles for"""'}), "('-z', '--min-zoom', type=int, required=True, help=\n 'Min zoom level to create tiles for')\n", (212, 305), False, 'import click\n'), ((325, 442), 'click.option', 'click.option', (['"""-Z"""', '"""--max-zoom"""'], {'type': 'int', 'required': '(True)', 'help': '"""Max zoom level to create tiles for (inclusive)"""'}), "('-Z', '--max-zoom', type=int, required=True, help=\n 'Max zoom level to create tiles for (inclusive)')\n", (337, 442), False, 'import click\n'), ((573, 726), 'click.option', 'click.option', (['"""--allowed-geom-type"""'], {'type': 'str', 'required': '(False)', 'multiple': '(True)', 'default': '[]', 'help': '"""Geometry types to keep in exported GeoJSON features."""'}), "('--allowed-geom-type', type=str, required=False, multiple=True,\n default=[], help='Geometry types to keep in exported GeoJSON features.')\n", (585, 726), False, 'import click\n'), ((1157, 1171), 'pathlib.Path', 'Path', (['tile_dir'], {}), '(tile_dir)\n', (1161, 1171), False, 'from pathlib import Path\n'), ((2886, 2912), 'shapely.ops.split', 'split', (['geometry', 'tile_geom'], {}), '(geometry, tile_geom)\n', (2891, 2912), False, 'from shapely.ops import split\n'), ((1221, 1249), 'shapely.geometry.asShape', 'asShape', (["feature['geometry']"], {}), "(feature['geometry'])\n", (1228, 1249), False, 'from shapely.geometry import asShape, box\n'), ((513, 570), 'click.Path', 'click.Path', ([], {'file_okay': '(False)', 'dir_okay': '(True)', 'writable': '(True)'}), '(file_okay=False, dir_okay=True, writable=True)\n', (523, 570), False, 'import click\n'), ((2802, 2825), 'mercantile.bounds', 'mercantile.bounds', (['tile'], {}), '(tile)\n', (2819, 2825), False, 'import mercantile\n'), ((1807, 1883), 'geojson.Feature', 'geojson.Feature', ([], {'geometry': 'clipped_geometry', 'properties': "feature['properties']"}), "(geometry=clipped_geometry, properties=feature['properties'])\n", (1822, 1883), False, 'import geojson\n'), ((2255, 2304), 'geojson.dumps', 'geojson.dumps', (['new_feature'], {'separators': "(',', ':')"}), "(new_feature, separators=(',', ':'))\n", (2268, 2304), False, 'import geojson\n'), ((2622, 2645), 'mercantile.bounds', 'mercantile.bounds', (['tile'], {}), '(tile)\n', (2639, 2645), False, 'import mercantile\n')] |
from __future__ import annotations
from typing import List, Optional
from xrpl import CryptoAlgorithm
from xrpl.core.addresscodec import encode_account_public_key, encode_node_public_key
from xrpl.core.keypairs import derive_keypair, generate_seed
from xrpl.wallet import Wallet
from slk.config.helper_classes import Keypair, Ports
class Network:
def __init__(self: Network, num_nodes: int, start_cfg_index: int) -> None:
self.num_nodes = num_nodes
self.validator_keypairs = self._generate_node_keypairs()
self.ports = [Ports(start_cfg_index + i) for i in range(self.num_nodes)]
def _generate_node_keypairs(self: Network) -> List[Keypair]:
"""generate keypairs suitable for validator keys"""
result = []
for i in range(self.num_nodes):
seed = generate_seed(None, CryptoAlgorithm.SECP256K1)
pub_key, priv_key = derive_keypair(seed, True)
result.append(
Keypair(
public_key=encode_node_public_key(bytes.fromhex(pub_key)),
secret_key=seed,
account_id=None,
)
)
return result
class SidechainNetwork(Network):
def __init__(
self: SidechainNetwork,
num_federators: int,
start_cfg_index: int,
num_nodes: Optional[int] = None,
) -> None:
super().__init__(num_nodes or num_federators, start_cfg_index)
self.num_federators = num_federators
self.federator_keypairs = self._generate_federator_keypairs()
self.main_account = Wallet.create(CryptoAlgorithm.SECP256K1)
def _generate_federator_keypairs(self: SidechainNetwork) -> List[Keypair]:
"""generate keypairs suitable for federator keys"""
result = []
for i in range(self.num_federators):
wallet = Wallet.create(crypto_algorithm=CryptoAlgorithm.ED25519)
result.append(
Keypair(
public_key=encode_account_public_key(
bytes.fromhex(wallet.public_key)
),
secret_key=wallet.seed,
account_id=wallet.classic_address,
)
)
return result
| [
"slk.config.helper_classes.Ports",
"xrpl.core.keypairs.derive_keypair",
"xrpl.core.keypairs.generate_seed",
"xrpl.wallet.Wallet.create"
] | [((1596, 1636), 'xrpl.wallet.Wallet.create', 'Wallet.create', (['CryptoAlgorithm.SECP256K1'], {}), '(CryptoAlgorithm.SECP256K1)\n', (1609, 1636), False, 'from xrpl.wallet import Wallet\n'), ((553, 579), 'slk.config.helper_classes.Ports', 'Ports', (['(start_cfg_index + i)'], {}), '(start_cfg_index + i)\n', (558, 579), False, 'from slk.config.helper_classes import Keypair, Ports\n'), ((817, 863), 'xrpl.core.keypairs.generate_seed', 'generate_seed', (['None', 'CryptoAlgorithm.SECP256K1'], {}), '(None, CryptoAlgorithm.SECP256K1)\n', (830, 863), False, 'from xrpl.core.keypairs import derive_keypair, generate_seed\n'), ((896, 922), 'xrpl.core.keypairs.derive_keypair', 'derive_keypair', (['seed', '(True)'], {}), '(seed, True)\n', (910, 922), False, 'from xrpl.core.keypairs import derive_keypair, generate_seed\n'), ((1863, 1918), 'xrpl.wallet.Wallet.create', 'Wallet.create', ([], {'crypto_algorithm': 'CryptoAlgorithm.ED25519'}), '(crypto_algorithm=CryptoAlgorithm.ED25519)\n', (1876, 1918), False, 'from xrpl.wallet import Wallet\n')] |
### DEPRECATE THESE? OLD VERSIONS OF CLEANING FUNCTIONS FOR JUST EBIKES
### NO LONGER WORKING WITH THESE
import pandas as pd
import numpy as np
from shapely.geometry import Point
import geopandas as gpd
from cabi.utils import which_anc, station_anc_dict
from cabi.get_data import anc_gdf
gdf = anc_gdf()
anc_dict = station_anc_dict()
station_keys = anc_dict.keys()
## NEEDS WORK!! FIX GET_DATA MODULE SO THAT LOAD CLEAN DOCKLESS CAN JUST CALL FROM THERE
def load_clean_dockless():
# FIX THIS CALL GET_DATA MODULE
df = pd.read_pickle('../data/wip/raw_dockless.pkl')
cleaned_ebikes = clean_frame(df)
cleaned_ebikes = cleaned_ebikes.drop('rideable_type', axis=1)
return cleaned_ebikes
def load_geo_ebikes():
df = load_clean_dockless()
geo_ebikes = to_geo(df)
return geo_ebikes
def load_clean_full():
"""DOCSTRING MAKE THIS EXTENSIBLE TO MORE MONTHS"""
df = pd.read_pickle('../data/wip/raw_apr_to_jul_df.pkl')
cleaned_full = clean_frame(df)
return cleaned_full
def geo_longer(df):
"""NEEDS DOCSTRING THIS FUNCTION MAKES ONE TIME COLUMN FROM START/END
AND DOUBLES THE LENGTH OF THE DF IN PROCESS, A GOOD TEST IS WHETHER OR NOT
THE LEN IS 2x OG DF"""
# List all the columns that are not start/end time for easy melt operation below
cols = list(df.columns)
cols.remove('started_at')
cols.remove('ended_at')
# Combine started_at/ended_at into one column 'time', indicating whether
# this was a trip start or trip end in another column, 'start_end',
# set index of new df to 'time'
# sort the index, so it makes sense as a time series
long_geo = df.rename(columns={'started_at': 'start', 'ended_at': 'end'}) \
.melt(id_vars=cols \
, value_vars=['start', 'end'] \
, var_name='start_end' \
, value_name='time') \
.set_index('time') \
.sort_index()
return long_geo
def load_long_geo():
"""DOCSTRING"""
df = load_geo_ebikes()
long_geo = geo_longer(df)
return long_geo
def load_long_geo_full():
"""DOCSTRING"""
df = load_geo_ebikes()
long_geo = geo_longer(df)
return long_geo
def anc_frame(df):
"""DOCSTRING"""
anc_df = df.drop(['start_station_name', 'end_station_name'], axis=1)
return anc_df
def load_long_anc():
"""DOCSTRING"""
df = load_long_geo()
anc_df = anc_frame(df)
return anc_df
# NEEDS WORK!! FIX DOCSTRING!! GENERALIZE TO ANY LOCATION COL (station etc.)
# This is likely uneccesary now that we have a more generalized long df function
def net_gain_loss_anc(ANC_name, df):
"""NEEDS DOCSTRING THIS FUNCTION RETURNS A SERIES (list? np.array?) OF 1 0 -1 VALUES
1 if RIDE ENDED IN ANC 0 IF RIDE DID NOT LEAVE OR END IN ANC -1 IF RIDE LEFT FROM ANC"""
conditions = [
(df['start_end'] == 'start') & (df['ANC_start'] == ANC_name),
(df['start_end'] == 'end') & (df['ANC_end'] == ANC_name),
(df['ANC_start'] != ANC_name) & (df['ANC_end'] != ANC_name),
(df['start_end'] == 'end') & (df['ANC_end'] != ANC_name),
(df['start_end'] == 'start') & (df['ANC_start'] != ANC_name)
]
values = [
-1,
1,
0,
0,
0
]
return np.select(conditions, values)
def plus_minus_anc_frame(df):
"""DOCSTRING GENERALIZE THIS FUNCTION TO ACCEPT OTHER THINGS BESIDE ANC REMOVE DEPENDENCY ON GDF"""
# Create dictionary of ancs (keys) and series of plus minus values returned from net_gain_loss_anc (values)
# for each unique ANC_ID
plus_minus_dict = {anc: net_gain_loss_anc(anc, df) \
for anc in \
list(gdf.ANC_ID)}
# Convert dict to dataframe, index by the (time) index of long_anc_df passed
anc_plus_minus_df = pd.DataFrame(plus_minus_dict, index=df.index)
return anc_plus_minus_df
def load_plus_minus_anc():
df = load_long_anc()
plus_minus = plus_minus_anc_frame(df)
return plus_minus
| [
"pandas.read_pickle",
"numpy.select",
"cabi.utils.station_anc_dict",
"cabi.get_data.anc_gdf",
"pandas.DataFrame"
] | [((297, 306), 'cabi.get_data.anc_gdf', 'anc_gdf', ([], {}), '()\n', (304, 306), False, 'from cabi.get_data import anc_gdf\n'), ((318, 336), 'cabi.utils.station_anc_dict', 'station_anc_dict', ([], {}), '()\n', (334, 336), False, 'from cabi.utils import which_anc, station_anc_dict\n'), ((533, 579), 'pandas.read_pickle', 'pd.read_pickle', (['"""../data/wip/raw_dockless.pkl"""'], {}), "('../data/wip/raw_dockless.pkl')\n", (547, 579), True, 'import pandas as pd\n'), ((913, 964), 'pandas.read_pickle', 'pd.read_pickle', (['"""../data/wip/raw_apr_to_jul_df.pkl"""'], {}), "('../data/wip/raw_apr_to_jul_df.pkl')\n", (927, 964), True, 'import pandas as pd\n'), ((3279, 3308), 'numpy.select', 'np.select', (['conditions', 'values'], {}), '(conditions, values)\n', (3288, 3308), True, 'import numpy as np\n'), ((3834, 3879), 'pandas.DataFrame', 'pd.DataFrame', (['plus_minus_dict'], {'index': 'df.index'}), '(plus_minus_dict, index=df.index)\n', (3846, 3879), True, 'import pandas as pd\n')] |
"""
Copyright (C) 2022 <NAME>
Released under MIT License. See the file LICENSE for details.
This module describes 2D/3D tracks. GUTS's output is a list of instances
of these classes.
"""
import numpy as np
from filter import filter2D, filter3D
from options import Options, Filter2DParams, Filter3DParams
from position import Position, Position3D
from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle
from activecorners import activecorners
from world import World
curr_id = 0
def next_id():
global curr_id
curr_id += 1
return curr_id
def reset_id():
global curr_id
curr_id = 0
class Track:
def __init__(self, options:Options, class_name:str, world:World,
current_time=None, det=None):
self.options = options
self.type = None # either "2D" och "3D"
self.history = dict()
self.times = dict() # total amount of seconds as a float, for each frame
self.class_name = class_name
self.world = world
self.last_updated = current_time
self.last_updated_frame = None
self.id = next_id()
self.should_die = False
def is_numerically_stable(self):
# Bad numerics can sometimes make Kalman numbers grow very large or NaN
# We are not interested in such tracks
c1 = np.any(np.abs(self.filter.x) > 1e8)
c2 = np.any(np.isnan(self.filter.x))
return (not c1) and (not c2)
def finalize(self):
if self.last_updated_frame is None:
self.history = {}
else:
self.history = {key:val for (key,val) in self.history.items() if
key <= self.last_updated_frame}
self.history = {key:val for (key,val) in self.history.items() if
not np.any(np.isnan(val))}
# Remove the track if it never moves significantly
has_significant_motion = False
has_significant_motion_counter = 0
first_pos = None
prev_frame = None
for frame_no, x_vec in self.history.items():
pos = x_vec[0:2]
if first_pos is None:
first_pos = pos
else:
assert frame_no > prev_frame
dist = vector_dist(pos, first_pos)
if dist > self.options.significant_motion_distance:
has_significant_motion_counter += 1
if has_significant_motion_counter > 8:
has_significant_motion = True
break
else:
has_significant_motion_counter = 0
prev_frame = frame_no
if not has_significant_motion:
self.history = dict()
class Track2D(Track):
def __init__(self, pos:Position, **kwargs):
super().__init__(**kwargs)
self.type = '2D'
p:Filter2DParams = kwargs['options'].params2D
x1, y1, x2, y2 = pos.aabb
x = (x1+x2)/2
y = (y1+y2)/2
self.filter = filter2D([x, y], [x2-x1, y2-y1],
P_factor=p.P_factor, Q_c=p.Q_c, Q_s=p.Q_s,
Q_v=p.Q_v, Q_ds=p.Q_ds, Q_a=p.Q_a, Q_cov=p.Q_cov,
Q_scov=p.Q_scov, R_c=p.R_c, R_s=p.R_s)
if not self.options.tracks2D:
raise ValueError("Tried to create a 2D track when not allowed")
def store_history(self, frame_no:int, time:float):
if frame_no in self.history:
raise ValueError(f"Frame number {frame_no} already exists!!")
self.history[frame_no] = self.filter.x.copy()
self.times[frame_no] = time
def predict(self):
self.filter.predict()
if not self.is_numerically_stable():
self.should_die = True
def get_x(self):
return self.filter.x
def update(self, det:Position, dt:float, frame_no:int, current_time:float):
x1, y1, x2, y2 = det.aabb
w = x2-x1
h = y2-y1
x = (x1+x2)/2
y = (y1+y2)/2
z = np.array([x, y, w, h], dtype=np.float32)
self.filter.update(z, dt)
assert current_time > self.last_updated
self.last_updated = current_time
self.last_updated_frame = frame_no
# Determine if track has sufficient amount of movement to be converted to a
# 3D track instead
def saom(self, current_time:float):
# 2D tracks need to have been recently updated for SAOM to trigger
# otherwise drifting nonsense tracks become 3D tracks
max_time = 2.01*(1.0/self.options.frame_rate)
if self.history and (current_time-self.last_updated)<=max_time:
first = min(self.history.keys())
xf, yf, wf, hf = self.history[first][0:4]
xn, yn, wn, hn = self.filter.x[0:4]
typical_size = np.mean([wf, hf, wn, hn])
dist = vector_dist([xf, yf], [xn, yn])
ratio = dist/typical_size
if ratio > self.options.saom_thresh:
return True
return False
# Convert to 3D track
def to3D(self, current_time:float):
first = min(self.history.keys())
dt = current_time - self.times[first]
assert dt > 0
xf, yf, wf, hf = self.history[first][0:4]
xn, yn, wn, hn = self.filter.x[0:4]
aabb_first = to_aabb(xf, yf, wf, hf)
aabb_now = to_aabb(xn, yn, wn, hn)
pos_first = Position(aabb=aabb_first, class_name=self.class_name)
pos_now = Position(aabb=aabb_now, class_name=self.class_name)
out = activecorners(pos1=pos_first, pos2=pos_now,
class_name=self.class_name,
world=self.world, dt=dt)
if out is None:
# Conversion to 3D failed, try again later
return self
else:
X, Y, l, w, h, v, phi = out
pos3D=np.array([X, Y], dtype=np.float32)
shape=np.array([l, w, h], dtype=np.float32)
new_track = Track3D(pos3D, shape, phi, v,
world=self.world, class_name=self.class_name,
options=self.options, current_time=current_time,
aabb_history=dict_copy(self.history),
old_times=self.times)
# Same ID to clearly mark that this 3D track inherits from 2D track
# Unintended side effect is that the track counter is increased
new_track.id = self.id
return new_track
class Track3D(Track):
def __init__(self, pos3D:np.ndarray, shape:np.ndarray, phi:float,
v:float, aabb_history:dict, old_times:dict, **kwargs):
super().__init__(**kwargs)
self.type = '3D'
self.tau = 1.0 / kwargs['world'].frame_rate
self.options = kwargs['options']
self.height = shape[-1]
self.aabb_history = aabb_history
self.times = dict_merge(self.times, old_times)
self.previous_detection = None
self.old_phi = None
if phi is None:
# If the road user is standing still, we still want to let
# activecorners work (or do we?)
self.init_filter(pos3D, shape, phi, v, self.tau)
elif np.isnan(phi):
# If we don't have phi yet, wait to create filter until we do
# which should happen at next update
# For now, just store the position which we'll need to compute phi
# This is only done in GUTS, active corners should never output NaN
self.filter = None
self.previous_detection = kwargs['det']
else:
self.init_filter(pos3D, shape, phi, v, self.tau)
def __repr__(self):
frames = list(self.history.keys())
if frames:
frames.sort()
start = frames[0]
stop = frames[-1]
else:
start = '?'
stop = '?'
return f"Track3D {self.class_name} {self.id}, {start}-{stop}"
def init_filter(self, pos3D, shape, phi, v, tau):
p:Filter3DParams = self.options.params3D
self.filter = filter3D(pos3D[0:2], shape[0:2], phi, v, tau=tau,
kappa=p.kappa, P_factor=p.P_factor,
Q_c=p.Q_c, Q_s=p.Q_s, Q_phi=p.Q_phi, Q_v=p.Q_v,
Q_omega=p.Q_omega, Q_cov=p.Q_cov,
R_c=p.R_c, R_s=p.R_s, R_phi=p.R_phi,
min_v_for_rotate=self.options.min_v_for_rotate)
def store_history(self, frame_no:int, time:float):
if self.filter is None:
return
if frame_no in self.history:
raise ValueError(f"Frame number {frame_no} already exists!!")
self.history[frame_no] = self.filter.x.copy()
self.times[frame_no] = time
def predict(self):
if self.filter is None:
return
self.filter.predict()
if not self.is_numerically_stable():
self.should_die = True
def get_x(self):
if self.filter is None:
x = np.array([*self.previous_detection.pos3D.flatten()[0:2],
*self.previous_detection.shape[0:2], float("nan")],
dtype=np.float32)
return x
else:
return self.filter.x
def vector_for_scoring(self, frame_no:int):
X = self.history[frame_no]
# Scoring vector should be x, y, l, w, phi
return X[0:5]
def suitable_previous_aabb_time(self, current_time:float):
good_number_of_frames = 5
l = len(self.aabb_history)
if l <= good_number_of_frames:
frame_no = min(self.aabb_history.keys())
return frame_no, current_time-self.times[frame_no]
else:
frame_nos = list(self.aabb_history.keys())
frame_nos.sort()
# Hopefully not too distant and also not too recent..?
frame_no = frame_nos[-good_number_of_frames]
return frame_no, current_time-self.times[frame_no]
def update(self, det, dt:float, frame_no:int, current_time:float):
assert current_time >= self.last_updated
self.last_updated = current_time
self.last_updated_frame = frame_no
if isinstance(det, Position3D):
X, Y = det.pos3D[0:2]
x, y = self.previous_detection.pos3D[0:2]
dist = vector_dist([X,Y], [x,y])
if dist > self.options.min_dist_for_phi:
phi = np.arctan2(Y-y, X-x)
factor = self.options.phi_smoothing_factor
if factor > 0.0 and (self.old_phi is not None):
phi = weighted_angle(self.old_phi, phi, factor)
if self.filter is None:
v = dist/self.tau
self.init_filter(det.pos3D, det.shape, phi, v, self.tau)
else:
z = np.array([*det.pos3D[0:2], *det.shape[0:2], phi],
dtype=np.float32)
self.filter.update(z)
self.old_phi = phi
elif isinstance(det, Position):
before, before_dt = self.suitable_previous_aabb_time(current_time)
xb, yb, wb, hb = self.aabb_history[before][0:4]
aabb_before = to_aabb(xb, yb, wb, hb)
pos_before = Position(aabb=aabb_before, class_name=self.class_name)
out = activecorners(pos_before, det,
self.class_name, self.world,
before_dt)
if out is None:
# Don't update the filter if active corners fail!
pass
else:
X, Y, l, w, h, v, phi = out
if l is None or w is None:
l, w = self.filter.x[2:4]
if h is None:
h = self.height
if phi is None:
phi = self.filter.x[4]
z = np.array([X, Y, l, w, phi], dtype=np.float32).flatten()
self.filter.update(z)
# Gradually update the height
self.height = 0.9 * self.height + 0.1 * h
# Store new AABB in AABB history, because this isn't done elsewhere
x1, y1, x2, y2 = det.aabb
xn = (x1+x2)/2.0
yn = (y1+y2)/2.0
wn = x2-x1
hn = y2-y1
to_be_stored = np.array([xn, yn, wn, hn], dtype=np.float32)
self.aabb_history[frame_no] = to_be_stored
else:
raise ValueError(f"Detection was of unknown type {type(det)}")
self.previous_detection = det
| [
"numpy.mean",
"numpy.abs",
"util.dict_merge",
"util.to_aabb",
"filter.filter2D",
"util.weighted_angle",
"filter.filter3D",
"numpy.array",
"util.dict_copy",
"numpy.isnan",
"numpy.arctan2",
"activecorners.activecorners",
"position.Position",
"util.vector_dist"
] | [((3262, 3439), 'filter.filter2D', 'filter2D', (['[x, y]', '[x2 - x1, y2 - y1]'], {'P_factor': 'p.P_factor', 'Q_c': 'p.Q_c', 'Q_s': 'p.Q_s', 'Q_v': 'p.Q_v', 'Q_ds': 'p.Q_ds', 'Q_a': 'p.Q_a', 'Q_cov': 'p.Q_cov', 'Q_scov': 'p.Q_scov', 'R_c': 'p.R_c', 'R_s': 'p.R_s'}), '([x, y], [x2 - x1, y2 - y1], P_factor=p.P_factor, Q_c=p.Q_c, Q_s=p.\n Q_s, Q_v=p.Q_v, Q_ds=p.Q_ds, Q_a=p.Q_a, Q_cov=p.Q_cov, Q_scov=p.Q_scov,\n R_c=p.R_c, R_s=p.R_s)\n', (3270, 3439), False, 'from filter import filter2D, filter3D\n'), ((4304, 4344), 'numpy.array', 'np.array', (['[x, y, w, h]'], {'dtype': 'np.float32'}), '([x, y, w, h], dtype=np.float32)\n', (4312, 4344), True, 'import numpy as np\n'), ((5616, 5639), 'util.to_aabb', 'to_aabb', (['xf', 'yf', 'wf', 'hf'], {}), '(xf, yf, wf, hf)\n', (5623, 5639), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((5659, 5682), 'util.to_aabb', 'to_aabb', (['xn', 'yn', 'wn', 'hn'], {}), '(xn, yn, wn, hn)\n', (5666, 5682), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((5703, 5756), 'position.Position', 'Position', ([], {'aabb': 'aabb_first', 'class_name': 'self.class_name'}), '(aabb=aabb_first, class_name=self.class_name)\n', (5711, 5756), False, 'from position import Position, Position3D\n'), ((5775, 5826), 'position.Position', 'Position', ([], {'aabb': 'aabb_now', 'class_name': 'self.class_name'}), '(aabb=aabb_now, class_name=self.class_name)\n', (5783, 5826), False, 'from position import Position, Position3D\n'), ((5842, 5942), 'activecorners.activecorners', 'activecorners', ([], {'pos1': 'pos_first', 'pos2': 'pos_now', 'class_name': 'self.class_name', 'world': 'self.world', 'dt': 'dt'}), '(pos1=pos_first, pos2=pos_now, class_name=self.class_name,\n world=self.world, dt=dt)\n', (5855, 5942), False, 'from activecorners import activecorners\n'), ((7271, 7304), 'util.dict_merge', 'dict_merge', (['self.times', 'old_times'], {}), '(self.times, old_times)\n', (7281, 7304), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((8492, 8758), 'filter.filter3D', 'filter3D', (['pos3D[0:2]', 'shape[0:2]', 'phi', 'v'], {'tau': 'tau', 'kappa': 'p.kappa', 'P_factor': 'p.P_factor', 'Q_c': 'p.Q_c', 'Q_s': 'p.Q_s', 'Q_phi': 'p.Q_phi', 'Q_v': 'p.Q_v', 'Q_omega': 'p.Q_omega', 'Q_cov': 'p.Q_cov', 'R_c': 'p.R_c', 'R_s': 'p.R_s', 'R_phi': 'p.R_phi', 'min_v_for_rotate': 'self.options.min_v_for_rotate'}), '(pos3D[0:2], shape[0:2], phi, v, tau=tau, kappa=p.kappa, P_factor=p\n .P_factor, Q_c=p.Q_c, Q_s=p.Q_s, Q_phi=p.Q_phi, Q_v=p.Q_v, Q_omega=p.\n Q_omega, Q_cov=p.Q_cov, R_c=p.R_c, R_s=p.R_s, R_phi=p.R_phi,\n min_v_for_rotate=self.options.min_v_for_rotate)\n', (8500, 8758), False, 'from filter import filter2D, filter3D\n'), ((1411, 1434), 'numpy.isnan', 'np.isnan', (['self.filter.x'], {}), '(self.filter.x)\n', (1419, 1434), True, 'import numpy as np\n'), ((5095, 5120), 'numpy.mean', 'np.mean', (['[wf, hf, wn, hn]'], {}), '([wf, hf, wn, hn])\n', (5102, 5120), True, 'import numpy as np\n'), ((5140, 5171), 'util.vector_dist', 'vector_dist', (['[xf, yf]', '[xn, yn]'], {}), '([xf, yf], [xn, yn])\n', (5151, 5171), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((6184, 6218), 'numpy.array', 'np.array', (['[X, Y]'], {'dtype': 'np.float32'}), '([X, Y], dtype=np.float32)\n', (6192, 6218), True, 'import numpy as np\n'), ((6237, 6274), 'numpy.array', 'np.array', (['[l, w, h]'], {'dtype': 'np.float32'}), '([l, w, h], dtype=np.float32)\n', (6245, 6274), True, 'import numpy as np\n'), ((7598, 7611), 'numpy.isnan', 'np.isnan', (['phi'], {}), '(phi)\n', (7606, 7611), True, 'import numpy as np\n'), ((10833, 10860), 'util.vector_dist', 'vector_dist', (['[X, Y]', '[x, y]'], {}), '([X, Y], [x, y])\n', (10844, 10860), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((1362, 1383), 'numpy.abs', 'np.abs', (['self.filter.x'], {}), '(self.filter.x)\n', (1368, 1383), True, 'import numpy as np\n'), ((10934, 10958), 'numpy.arctan2', 'np.arctan2', (['(Y - y)', '(X - x)'], {}), '(Y - y, X - x)\n', (10944, 10958), True, 'import numpy as np\n'), ((11751, 11774), 'util.to_aabb', 'to_aabb', (['xb', 'yb', 'wb', 'hb'], {}), '(xb, yb, wb, hb)\n', (11758, 11774), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((11800, 11854), 'position.Position', 'Position', ([], {'aabb': 'aabb_before', 'class_name': 'self.class_name'}), '(aabb=aabb_before, class_name=self.class_name)\n', (11808, 11854), False, 'from position import Position, Position3D\n'), ((11873, 11943), 'activecorners.activecorners', 'activecorners', (['pos_before', 'det', 'self.class_name', 'self.world', 'before_dt'], {}), '(pos_before, det, self.class_name, self.world, before_dt)\n', (11886, 11943), False, 'from activecorners import activecorners\n'), ((12922, 12966), 'numpy.array', 'np.array', (['[xn, yn, wn, hn]'], {'dtype': 'np.float32'}), '([xn, yn, wn, hn], dtype=np.float32)\n', (12930, 12966), True, 'import numpy as np\n'), ((2358, 2385), 'util.vector_dist', 'vector_dist', (['pos', 'first_pos'], {}), '(pos, first_pos)\n', (2369, 2385), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((6536, 6559), 'util.dict_copy', 'dict_copy', (['self.history'], {}), '(self.history)\n', (6545, 6559), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((11121, 11162), 'util.weighted_angle', 'weighted_angle', (['self.old_phi', 'phi', 'factor'], {}), '(self.old_phi, phi, factor)\n', (11135, 11162), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((11365, 11432), 'numpy.array', 'np.array', (['[*det.pos3D[0:2], *det.shape[0:2], phi]'], {'dtype': 'np.float32'}), '([*det.pos3D[0:2], *det.shape[0:2], phi], dtype=np.float32)\n', (11373, 11432), True, 'import numpy as np\n'), ((1860, 1873), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (1868, 1873), True, 'import numpy as np\n'), ((12439, 12484), 'numpy.array', 'np.array', (['[X, Y, l, w, phi]'], {'dtype': 'np.float32'}), '([X, Y, l, w, phi], dtype=np.float32)\n', (12447, 12484), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
# 1.导入拓展
from flask import Flask
from flask_restful import Api
import config
from app.api.view.auth import wx_login
from app.api.view.talk import Reply
# 2.创建flask应用实例,__name__用来确定资源所在的路径
app = Flask(__name__)
app.config.from_object(config.DevelopmentConfig)
api = Api(app)
# 3.定义全局变量
# 4.定义路由和视图函数
# 定义restful api
app.add_url_rule('/auth/wxlogin', view_func=wx_login.as_view('wxlogin'))
app.add_url_rule('/reply', view_func=Reply.as_view('reply'))
# 4.启动程序
if __name__ == '__main__':
app.run(debug=True)
| [
"app.api.view.auth.wx_login.as_view",
"flask_restful.Api",
"app.api.view.talk.Reply.as_view",
"flask.Flask"
] | [((218, 233), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (223, 233), False, 'from flask import Flask\n'), ((289, 297), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (292, 297), False, 'from flask_restful import Api\n'), ((384, 411), 'app.api.view.auth.wx_login.as_view', 'wx_login.as_view', (['"""wxlogin"""'], {}), "('wxlogin')\n", (400, 411), False, 'from app.api.view.auth import wx_login\n'), ((450, 472), 'app.api.view.talk.Reply.as_view', 'Reply.as_view', (['"""reply"""'], {}), "('reply')\n", (463, 472), False, 'from app.api.view.talk import Reply\n')] |
import numpy.random as rand
import numpy as np
import pandas as pd
import random
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
from Particle import Particle
#Initialization of the plots
fig = plt.figure(figsize=(20,10))
axes = [None, None, None]
def generate_random_particle(_id, input_size, neurons):
"""Function to generate random particle to init PSO algorithm"""
position = []
speed = []
n_neurons = sum(neurons)
n_weights = input_size * neurons[0]
for i in range(len(neurons) - 1):
n_weights = n_weights + neurons[i]*neurons[i+1]
total_n_values = n_weights + (2* n_neurons) # give the PSO the possibility to select the activation functions and bias, subtract one if the activation function is not needed for the last neuron
position = 2 * rand.random_sample(total_n_values) - 1
speed = np.zeros(total_n_values)
return Particle(_id, position, speed, n_weights, n_neurons)
class PSO:
"""Class that implements the PSO algorithm"""
def __init__(self, swarm_size, n_informants, alpha_max, alpha_min, beta, gamma, delta, epsilon, ann, max_iterations, test_set_path, input_size):
axes[1] = fig.add_subplot(132)
axes[2] = fig.add_subplot(133)
self.swarm_size = swarm_size
self.alpha_max = alpha_max
self.alpha_min = alpha_min
self.beta = beta
self.gamma = gamma
self.delta = delta
self.epsilon = epsilon
self.swarm = [generate_random_particle(id, input_size, ann.neurons) for id in range(swarm_size)] # init swarm
self.best = None
self.best_fitness = 1000 #initialise the error to an high value
self.ann = ann
self.max_iterations = max_iterations
self.input_size = input_size
self.n_informants = n_informants
# Setup the dataset structure to expect and the function plots based on the input size
if input_size == 1:
columns = ['x', 'y']
axes[0] = fig.add_subplot(131)
else:
columns = ['x1', 'x2', 'y']
axes[0] = fig.add_subplot(131, projection='3d')
self.test_set = pd.read_csv(test_set_path, sep='\s+|\t+|\s+\t+|\t+\s+', header=None, names=columns, engine='python')
#init arrays used to plot the results during the execution
self.error = []
self.steps = []
self.best_record = []
#assign informants to each particle
for p in self.swarm:
p.select_informants(self.swarm, self.n_informants)
def execute(self):
""" Function to run the PSO algorithm"""
anim = FuncAnimation(fig, self.step, frames=self.max_iterations, repeat=False)
plt.show()
def step(self, i):
""" Wrapper to execute one step of the PSO algorithm and plot the indermediate results"""
self.pso_step(i+1)
self.plot_result()
def pso_step(self, i):
""" Execution of a step of the PSO algorithm as explained in the lectures slides """
for particle in self.swarm:
self.assess_fitness(particle)
if self.best is None or particle.fitness < self.best_fitness:
self.best = particle
self.best_fitness = particle.fitness
self.best_fitness_position = particle.best_fitness_position
x_swarm = self.best_fitness_position
for particle in self.swarm:
new_speed = np.zeros(particle.speed.shape)
x_fit = particle.best_fitness_position
x_inf = particle.get_previous_fittest_of_informants()
for l in range(len(particle.position)):
a = (self.alpha_max - self.alpha_min) * ((self.max_iterations - i) / self.max_iterations) + self.alpha_min
b = random.uniform(0, self.beta)
c = random.uniform(0, self.gamma)
d = random.uniform(0, self.delta)
new_speed[l] = a * particle.speed[l] + b * (x_fit[l] - particle.position[l]) + c * (x_inf[l] - particle.position[l]) + d * (x_swarm[l] - particle.position[l])
particle.speed = new_speed
particle.update_position(self.epsilon)
self.steps.append(i)
self.error.append(self.best_fitness)
self.best_record.append(self.best.id)
print("{} | Best fitness so far: {}".format(i, self.best_fitness))
def assess_fitness(self, particle):
""" Function to assess the fitness of a particle using MSE"""
graph = []
old_fitness = particle.best_fitness
self.ann.set_values(particle.position)
mse = 0
n = len(self.test_set)
for _, row in self.test_set.iterrows():
if self.input_size == 1:
x_i = [row[0]]
d = row[1]
else:
x_i = [row[0], row[1]]
d = row[2]
u = self.ann.process(x_i)
graph.append(u)
mse_i = (d - u) ** 2
mse = mse + mse_i
particle.fitness = mse / n
if (particle.fitness < old_fitness):
particle.best_fitness_graph = graph
particle.best_fitness = particle.fitness
particle.best_fitness_position = particle.position
def plot_result(self):
"Function to plot the intermediate results of the PSO algorithm"
#clear the figure from previous step's results
axes[0].clear()
axes[1].clear()
axes[2].clear()
#Reconstruct the cleared plots
axes[0].title.set_text('Functions')
axes[1].title.set_text('MSE')
axes[1].set_xlabel('Number of iterations')
axes[1].set_ylabel('Mean Squared Error')
axes[2].title.set_text('Best Particle')
axes[2].set_xlabel('Number of iterations')
axes[2].set_ylabel('Best Particle ID')
#plot the results in a different manner depending on the input size
if self.input_size == 1:
x = self.test_set['x']
y = self.test_set['y']
g = self.best.best_fitness_graph
axes[0].plot(x,g, label='Approximated Function')
axes[0].plot(x,y, label='Desidered Function')
axes[0].legend()
else:
x1 = self.test_set['x1']
x2 = self.test_set['x2']
y = self.test_set['y']
g = self.best.best_fitness_graph
axes[0].scatter(x1, x2, y, label='Desidered Function')
axes[0].scatter(x1, x2, g, label='Approximated Function')
axes[0].legend()
#plot error
axes[1].set_ylim([0, 0.1])
axes[1].plot(self.steps, self.error)
#plot the fittest particle
axes[2].plot(self.steps, self.best_record)
axes[2].set_ylim([0, self.swarm_size])
| [
"random.uniform",
"numpy.random.random_sample",
"pandas.read_csv",
"Particle.Particle",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.show"
] | [((280, 308), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (290, 308), True, 'import matplotlib.pyplot as plt\n'), ((926, 950), 'numpy.zeros', 'np.zeros', (['total_n_values'], {}), '(total_n_values)\n', (934, 950), True, 'import numpy as np\n'), ((962, 1014), 'Particle.Particle', 'Particle', (['_id', 'position', 'speed', 'n_weights', 'n_neurons'], {}), '(_id, position, speed, n_weights, n_neurons)\n', (970, 1014), False, 'from Particle import Particle\n'), ((2232, 2339), 'pandas.read_csv', 'pd.read_csv', (['test_set_path'], {'sep': '"""\\\\s+|\t+|\\\\s+\t+|\t+\\\\s+"""', 'header': 'None', 'names': 'columns', 'engine': '"""python"""'}), "(test_set_path, sep='\\\\s+|\\t+|\\\\s+\\t+|\\t+\\\\s+', header=None,\n names=columns, engine='python')\n", (2243, 2339), True, 'import pandas as pd\n'), ((2712, 2783), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'self.step'], {'frames': 'self.max_iterations', 'repeat': '(False)'}), '(fig, self.step, frames=self.max_iterations, repeat=False)\n', (2725, 2783), False, 'from matplotlib.animation import FuncAnimation\n'), ((2792, 2802), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2800, 2802), True, 'import matplotlib.pyplot as plt\n'), ((875, 909), 'numpy.random.random_sample', 'rand.random_sample', (['total_n_values'], {}), '(total_n_values)\n', (893, 909), True, 'import numpy.random as rand\n'), ((3544, 3574), 'numpy.zeros', 'np.zeros', (['particle.speed.shape'], {}), '(particle.speed.shape)\n', (3552, 3574), True, 'import numpy as np\n'), ((3887, 3915), 'random.uniform', 'random.uniform', (['(0)', 'self.beta'], {}), '(0, self.beta)\n', (3901, 3915), False, 'import random\n'), ((3936, 3965), 'random.uniform', 'random.uniform', (['(0)', 'self.gamma'], {}), '(0, self.gamma)\n', (3950, 3965), False, 'import random\n'), ((3986, 4015), 'random.uniform', 'random.uniform', (['(0)', 'self.delta'], {}), '(0, self.delta)\n', (4000, 4015), False, 'import random\n')] |
#ASAPY
import requests
__URL_GLOBAL = "https://www.urionlinejudge.com.br";
def printme(pagina):
body = getCorpo(__URL_GLOBAL+"/judge/pt/problems/view/"+pagina);
iInicio = find_str(body, "<iframe");
pos = (body[iInicio:]);
iFim = find_str(pos, ">")+1;
tupla = pos[:iFim];
page2 = getAttr(tupla,"src");
bodyframe = getCorpo(__URL_GLOBAL+page2);
print(bodyframe);
return;
def find_str(s, char):
index = 0
if char in s:
c = char[0]
for ch in s:
if ch == c:
if s[index:index+len(char)] == char:
return index
index += 1
return -1
#TODO - TRATAR EQUIVALENCIA DE SINTAXE !
def getAttr(tupla, atributo):
tamanhoAtr = len(atributo)+2; #ja apaga atributo="
inicioAtr = find_str(tupla, atributo)+tamanhoAtr;
if inicioAtr == -1:
return "ERRO"
fimAttr = find_str(tupla[inicioAtr:], '"');
return tupla[inicioAtr:inicioAtr+fimAttr];
def getCorpo(req):
page = requests.get(req);
return str(page.content);
printme("2166")
#print("titulo => URI Online Judge - Problema 2166 - Raiz Quadrada de 2")
#print("autor => <NAME>, UNILA")
#print("probm => ma das formas de calcular a raiz quadrada de um n\xc3\xbamero natural") | [
"requests.get"
] | [((1048, 1065), 'requests.get', 'requests.get', (['req'], {}), '(req)\n', (1060, 1065), False, 'import requests\n')] |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.distributed.ClockDelta import *
import ButterflyGlobals
import random
class DistributedButterflyAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedButterflyAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.area = 0
self.playground = 0
self.stateIndex = 0
self.curIndex = 0
self.destIndex = 0
self.time = 0
self.timestamp = 0
def generate(self):
ButterflyGlobals.generateIndexes(self.doId, self.playground)
fr = ButterflyGlobals.getFirstRoute(self.playground, self.area, self.doId)
self.b_setState(ButterflyGlobals.FLYING, fr[1], fr[3], fr[4], globalClockDelta.getRealNetworkTime())
taskMgr.doMethodLater(fr[4], self.__land, 'landButterfly%i' % self.doId, [])
def __land(self):
ttl = random.uniform(0, ButterflyGlobals.MAX_LANDED_TIME)
self.b_setState(ButterflyGlobals.LANDED, self.curIndex, self.destIndex, ttl, globalClockDelta.getRealNetworkTime())
taskMgr.doMethodLater(ttl, self.__fly, 'flyButterfly%i' % self.doId, [])
def __fly(self):
next = ButterflyGlobals.getNextPos(ButterflyGlobals.ButterflyPoints[self.playground][self.area][self.destIndex], self.playground, self.area, self.doId)
self.b_setState(ButterflyGlobals.FLYING, self.destIndex, next[1], next[2], globalClockDelta.getRealNetworkTime())
taskMgr.doMethodLater(next[2], self.__land, 'landButterfly%i' % self.doId, [])
def setArea(self, playground, area):
self.area = area
self.playground = playground
def d_setArea(self, playground, area):
self.sendUpdate('setArea', [playground, area])
def b_setArea(self, playground, area):
self.setArea(playground, area)
self.d_setArea(playground, area)
def getArea(self):
return [self.playground, self.area]
def setState(self, stateIndex, curIndex, destIndex, time, timestamp):
self.stateIndex = stateIndex
self.curIndex = curIndex
self.destIndex = destIndex
self.time = time
self.timestamp = timestamp
def d_setState(self, stateIndex, curIndex, destIndex, time, timestamp):
self.sendUpdate('setState', [stateIndex, curIndex, destIndex, time, timestamp])
def b_setState(self, stateIndex, curIndex, destIndex, time, timestamp):
self.setState(stateIndex, curIndex, destIndex, time, timestamp)
self.d_setState(stateIndex, curIndex, destIndex, time, timestamp)
def getState(self):
return [self.stateIndex, self.curIndex, self.destIndex, self.time, self.timestamp]
def avatarEnter(self):
pass
| [
"random.uniform",
"ButterflyGlobals.getNextPos",
"ButterflyGlobals.getFirstRoute",
"ButterflyGlobals.generateIndexes",
"direct.directnotify.DirectNotifyGlobal.directNotify.newCategory",
"direct.distributed.DistributedObjectAI.DistributedObjectAI.__init__"
] | [((269, 338), 'direct.directnotify.DirectNotifyGlobal.directNotify.newCategory', 'DirectNotifyGlobal.directNotify.newCategory', (['"""DistributedButterflyAI"""'], {}), "('DistributedButterflyAI')\n", (312, 338), False, 'from direct.directnotify import DirectNotifyGlobal\n'), ((377, 416), 'direct.distributed.DistributedObjectAI.DistributedObjectAI.__init__', 'DistributedObjectAI.__init__', (['self', 'air'], {}), '(self, air)\n', (405, 416), False, 'from direct.distributed.DistributedObjectAI import DistributedObjectAI\n'), ((634, 694), 'ButterflyGlobals.generateIndexes', 'ButterflyGlobals.generateIndexes', (['self.doId', 'self.playground'], {}), '(self.doId, self.playground)\n', (666, 694), False, 'import ButterflyGlobals\n'), ((708, 777), 'ButterflyGlobals.getFirstRoute', 'ButterflyGlobals.getFirstRoute', (['self.playground', 'self.area', 'self.doId'], {}), '(self.playground, self.area, self.doId)\n', (738, 777), False, 'import ButterflyGlobals\n'), ((1013, 1064), 'random.uniform', 'random.uniform', (['(0)', 'ButterflyGlobals.MAX_LANDED_TIME'], {}), '(0, ButterflyGlobals.MAX_LANDED_TIME)\n', (1027, 1064), False, 'import random\n'), ((1315, 1468), 'ButterflyGlobals.getNextPos', 'ButterflyGlobals.getNextPos', (['ButterflyGlobals.ButterflyPoints[self.playground][self.area][self.destIndex]', 'self.playground', 'self.area', 'self.doId'], {}), '(ButterflyGlobals.ButterflyPoints[self.\n playground][self.area][self.destIndex], self.playground, self.area,\n self.doId)\n', (1342, 1468), False, 'import ButterflyGlobals\n')] |
from io import IncrementalNewlineDecoder
from django.db import models
# Classe de departamento
class Departamento(models.Model):
id = models.IntegerField(primary_key=True, editable=False)
nome = models.CharField(max_length=255, blank=False)
numero_projetos = models.IntegerField(default=0) # Quantidade de projetos no departamento
numero_funcionarios = models.IntegerField(default=0) # Quantidade de funcionários do departamento
# Construtor da classe
#def __init__(self, nome):
# self.nome = nome
def __str__(self):
return self.nome
def adiciona_funcionario(self, id_funcionario):
# ToDo acrescentar linha na tabela funcionario_departamento
print(id_funcionario)
self.numero_funcionarios += 1
def remove_funcionario(self, id_funcionario):
# ToDo acrescentar linha na tabela funcionario_departamento
print(id_funcionario)
self.numero_funcionarios -= 1
def adiciona_projeto(self):
self.numero_projetos += 1
def remove_projeto(self):
self.numero_projetos -= 1
| [
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((139, 192), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'editable': '(False)'}), '(primary_key=True, editable=False)\n', (158, 192), False, 'from django.db import models\n'), ((204, 249), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(False)'}), '(max_length=255, blank=False)\n', (220, 249), False, 'from django.db import models\n'), ((272, 302), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (291, 302), False, 'from django.db import models\n'), ((370, 400), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (389, 400), False, 'from django.db import models\n')] |
import argparse
import os
import torch
import matplotlib.pyplot as plt
from torch.utils.data.distributed import DistributedSampler
from torch import distributed as dist
from torch import optim
from tqdm import tqdm
from torch_ema import ExponentialMovingAverage
from cifr.core.config import Config
from cifr.models.builder import build_architecture, build_optimizer, build_dataset
from cifr.models.builder import build_discriminator
from cifr.models.losses.contextual_loss import ContextualLoss, ContextualBilateralLoss
from cifr.models.losses.gradient_norm import normalize_gradient
from cifr.models.losses.gan_loss import d_logistic_loss
from cifr.models.losses.gan_loss import g_nonsaturating_loss
from tools.utils import query_all_pixels
from tools.utils import requires_grad
from tools.utils import save_pred_img
WORK_DIR = './work_dir'
def synchronize():
if not dist.is_available() or not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def get_world_size():
if not dist.is_available() or not dist.is_initialized():
return 1
return dist.get_world_size()
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
def train(args, config, device):
model = build_architecture(config.model).to(device)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
encoder = build_architecture(config.encoder).to(device)
encoder = torch.nn.SyncBatchNorm.convert_sync_batchnorm(encoder)
disc = build_discriminator(config.discriminator).to(device)
disc = torch.nn.SyncBatchNorm.convert_sync_batchnorm(disc)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
encoder = torch.nn.parallel.DistributedDataParallel(
encoder,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
disc = torch.nn.parallel.DistributedDataParallel(
disc,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
config.optimizer.update({'params': [
{'params': encoder.parameters()},
{'params': model.parameters()}
]})
optim_g = build_optimizer(config.optimizer)
config.optimizer.update({'params': disc.parameters()})
optim_d = build_optimizer(config.optimizer)
scheduler_g = optim.lr_scheduler.StepLR(optim_g, step_size=50, gamma=0.5)
scheduler_d = optim.lr_scheduler.StepLR(optim_d, step_size=50, gamma=0.5)
model_ema = ExponentialMovingAverage(model.parameters(), decay=0.995)
encoder_ema = ExponentialMovingAverage(encoder.parameters(), decay=0.995)
train_set_gan = build_dataset(config.train_dataset_gan)
train_set = build_dataset(config.train_dataset)
test_set = build_dataset(config.test_dataset)
train_loader_gan = torch.utils.data.DataLoader(
train_set_gan,
batch_size=config.batch_size,
num_workers=6,
drop_last=True,
sampler=DistributedSampler(train_set_gan, shuffle=True),
)
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=config.batch_size,
num_workers=6,
drop_last=True,
sampler=DistributedSampler(train_set, shuffle=True),
)
test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=1,
num_workers=1
)
contextual_loss = ContextualLoss(use_vgg=True, vgg_layer="conv5_4").to(device)
loss_fn = torch.nn.L1Loss()
grad_norm_fn = normalize_gradient if config.discriminator_gradient_norm else lambda fn, x: fn(x)
config_name = os.path.splitext(os.path.basename(args.config))[0] if args.name is None else args.name
os.makedirs(f'{WORK_DIR}/{config_name}/images', exist_ok=True)
os.makedirs(f'{WORK_DIR}/{config_name}/checkpoints', exist_ok=True)
# config.dump(f'{WORK_DIR}/{config_name}/{config_name}.py')
rows = 20
cols = 3
fig = plt.figure(figsize=(15, rows*6))
total_iter = len(train_set) // config.batch_size // dist.get_world_size()
epoch_pbar = tqdm(
range(config.epoch),
total=config.epoch,
desc='Epoch',
position=0,
ncols=0,
disable=dist.get_rank()!=0
)
for epoch in epoch_pbar:
iter_pbar = tqdm(
enumerate(zip(train_loader, train_loader_gan)),
total=total_iter,
leave=False,
position=1,
ncols=0,
disable=dist.get_rank()!=0
)
for n, (batch, batch_gan) in iter_pbar:
encoder.train()
model.train()
disc.train()
lr = batch_gan['lr'].to(device)
coord = batch_gan['coord'].to(device)
cell = batch_gan['cell'].to(device)
real = batch_gan['real'].to(device)
#
# Generator Step
#
requires_grad(disc, False)
optim_g.zero_grad()
fake = query_all_pixels(encoder, model, lr, coord, cell, 1024)
fake_pred = grad_norm_fn(disc, fake)
ctx_loss = contextual_loss(fake, real)
loss_fake = g_nonsaturating_loss(fake_pred)
loss_g = ctx_loss + loss_fake
loss_g.backward()
query_inp = batch['inp'].to(device)
query_coord = batch['coord'].to(device)
query_cell = batch['cell'].to(device)
query_gt = batch['gt'].to(device)
feature = encoder(query_inp)
query_pred = model(query_inp, feature, query_coord, query_cell)
query_l1_loss = loss_fn(query_pred, query_gt)
query_l1_loss.backward()
optim_g.step()
encoder_ema.update()
model_ema.update()
#
# Discriminator Step
#
requires_grad(disc, True)
optim_d.zero_grad()
fake_pred = grad_norm_fn(disc, fake.detach())
real_pred = grad_norm_fn(disc, real)
loss_d = d_logistic_loss(real_pred, fake_pred)
loss_d.backward()
optim_d.step()
loss_dict = {
'd': loss_d,
'g': loss_g,
'g_ctx': ctx_loss,
'query_l1': query_l1_loss
}
reduced_loss = reduce_loss_dict(loss_dict)
if dist.get_rank() == 0:
loss_d = reduced_loss['d']
loss_g = reduced_loss['g']
ctx_loss = reduced_loss['g_ctx']
query_l1_loss = reduced_loss['query_l1']
loss_str = f'd: {loss_d:.4f};'
loss_str += f' g: {loss_g:.4f};'
loss_str += f' g_ctx: {ctx_loss:.4f}'
loss_str += f' query_l1: {query_l1_loss:.4f}'
iter_pbar.set_description(loss_str)
scheduler_g.step()
scheduler_d.step()
if dist.get_rank() == 0:
torch.save(
{
'encoder': encoder.module.state_dict(),
'model': model.module.state_dict(),
'encoder_ema': encoder_ema.state_dict(),
'model_ema': model_ema.state_dict(),
'discriminator': disc.module.state_dict(),
},
f'{WORK_DIR}/{config_name}/checkpoints/{epoch+1:0>6}.pth'
)
encoder_ema.store(encoder.parameters())
model_ema.store(model.parameters())
encoder_ema.copy_to(encoder.parameters())
model_ema.copy_to(model.parameters())
encoder.eval()
model.eval()
img_path = f'{WORK_DIR}/{config_name}/images/train_{epoch+1:0>6}.jpg'
save_pred_img(encoder, model, test_loader, img_path, fig, rows, cols)
encoder_ema.restore(encoder.parameters())
model_ema.restore(model.parameters())
iter_pbar.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--name', type=str, default=None)
args = parser.parse_args()
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
device = torch.device(f'cuda:{local_rank}')
synchronize()
cfg = Config.fromfile(args.config)
train(args, cfg, device)
| [
"cifr.models.losses.contextual_loss.ContextualLoss",
"torch.nn.L1Loss",
"cifr.models.losses.gan_loss.d_logistic_loss",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.get_rank",
"torch.distributed.is_available",
"torch.distributed.barrier",
"cifr.models.builder.build_dataset",
... | [((959, 980), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (978, 980), True, 'from torch import distributed as dist\n'), ((1024, 1038), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (1036, 1038), True, 'from torch import distributed as dist\n'), ((1151, 1172), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1170, 1172), True, 'from torch import distributed as dist\n'), ((1801, 1853), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', (['model'], {}), '(model)\n', (1846, 1853), False, 'import torch\n'), ((1929, 1983), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', (['encoder'], {}), '(encoder)\n', (1974, 1983), False, 'import torch\n'), ((2060, 2111), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', (['disc'], {}), '(disc)\n', (2105, 2111), False, 'import torch\n'), ((2819, 2852), 'cifr.models.builder.build_optimizer', 'build_optimizer', (['config.optimizer'], {}), '(config.optimizer)\n', (2834, 2852), False, 'from cifr.models.builder import build_architecture, build_optimizer, build_dataset\n'), ((2926, 2959), 'cifr.models.builder.build_optimizer', 'build_optimizer', (['config.optimizer'], {}), '(config.optimizer)\n', (2941, 2959), False, 'from cifr.models.builder import build_architecture, build_optimizer, build_dataset\n'), ((2979, 3038), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optim_g'], {'step_size': '(50)', 'gamma': '(0.5)'}), '(optim_g, step_size=50, gamma=0.5)\n', (3004, 3038), False, 'from torch import optim\n'), ((3057, 3116), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optim_d'], {'step_size': '(50)', 'gamma': '(0.5)'}), '(optim_d, step_size=50, gamma=0.5)\n', (3082, 3116), False, 'from torch import optim\n'), ((3291, 3330), 'cifr.models.builder.build_dataset', 'build_dataset', (['config.train_dataset_gan'], {}), '(config.train_dataset_gan)\n', (3304, 3330), False, 'from cifr.models.builder import build_architecture, build_optimizer, build_dataset\n'), ((3347, 3382), 'cifr.models.builder.build_dataset', 'build_dataset', (['config.train_dataset'], {}), '(config.train_dataset)\n', (3360, 3382), False, 'from cifr.models.builder import build_architecture, build_optimizer, build_dataset\n'), ((3398, 3432), 'cifr.models.builder.build_dataset', 'build_dataset', (['config.test_dataset'], {}), '(config.test_dataset)\n', (3411, 3432), False, 'from cifr.models.builder import build_architecture, build_optimizer, build_dataset\n'), ((3903, 3969), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_set'], {'batch_size': '(1)', 'num_workers': '(1)'}), '(test_set, batch_size=1, num_workers=1)\n', (3930, 3969), False, 'import torch\n'), ((4097, 4114), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (4112, 4114), False, 'import torch\n'), ((4326, 4388), 'os.makedirs', 'os.makedirs', (['f"""{WORK_DIR}/{config_name}/images"""'], {'exist_ok': '(True)'}), "(f'{WORK_DIR}/{config_name}/images', exist_ok=True)\n", (4337, 4388), False, 'import os\n'), ((4393, 4460), 'os.makedirs', 'os.makedirs', (['f"""{WORK_DIR}/{config_name}/checkpoints"""'], {'exist_ok': '(True)'}), "(f'{WORK_DIR}/{config_name}/checkpoints', exist_ok=True)\n", (4404, 4460), False, 'import os\n'), ((4562, 4596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, rows * 6)'}), '(figsize=(15, rows * 6))\n', (4572, 4596), True, 'import matplotlib.pyplot as plt\n'), ((8578, 8603), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8601, 8603), False, 'import argparse\n'), ((8806, 8839), 'torch.cuda.set_device', 'torch.cuda.set_device', (['local_rank'], {}), '(local_rank)\n', (8827, 8839), False, 'import torch\n'), ((8844, 8918), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (8880, 8918), False, 'import torch\n'), ((8932, 8966), 'torch.device', 'torch.device', (['f"""cuda:{local_rank}"""'], {}), "(f'cuda:{local_rank}')\n", (8944, 8966), False, 'import torch\n'), ((8996, 9024), 'cifr.core.config.Config.fromfile', 'Config.fromfile', (['args.config'], {}), '(args.config)\n', (9011, 9024), False, 'from cifr.core.config import Config\n'), ((1300, 1315), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1313, 1315), False, 'import torch\n'), ((1484, 1506), 'torch.stack', 'torch.stack', (['losses', '(0)'], {}), '(losses, 0)\n', (1495, 1506), False, 'import torch\n'), ((1515, 1541), 'torch.distributed.reduce', 'dist.reduce', (['losses'], {'dst': '(0)'}), '(losses, dst=0)\n', (1526, 1541), True, 'from torch import distributed as dist\n'), ((4652, 4673), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4671, 4673), True, 'from torch import distributed as dist\n'), ((877, 896), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (894, 896), True, 'from torch import distributed as dist\n'), ((904, 925), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (923, 925), True, 'from torch import distributed as dist\n'), ((1073, 1092), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (1090, 1092), True, 'from torch import distributed as dist\n'), ((1100, 1121), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (1119, 1121), True, 'from torch import distributed as dist\n'), ((1554, 1569), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1567, 1569), True, 'from torch import distributed as dist\n'), ((1745, 1777), 'cifr.models.builder.build_architecture', 'build_architecture', (['config.model'], {}), '(config.model)\n', (1763, 1777), False, 'from cifr.models.builder import build_architecture, build_optimizer, build_dataset\n'), ((1869, 1903), 'cifr.models.builder.build_architecture', 'build_architecture', (['config.encoder'], {}), '(config.encoder)\n', (1887, 1903), False, 'from cifr.models.builder import build_architecture, build_optimizer, build_dataset\n'), ((1996, 2037), 'cifr.models.builder.build_discriminator', 'build_discriminator', (['config.discriminator'], {}), '(config.discriminator)\n', (2015, 2037), False, 'from cifr.models.builder import build_discriminator\n'), ((2243, 2258), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2256, 2258), True, 'from torch import distributed as dist\n'), ((2432, 2447), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2445, 2447), True, 'from torch import distributed as dist\n'), ((2615, 2630), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2628, 2630), True, 'from torch import distributed as dist\n'), ((3610, 3657), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_set_gan'], {'shuffle': '(True)'}), '(train_set_gan, shuffle=True)\n', (3628, 3657), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3834, 3877), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True)\n', (3852, 3877), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((4022, 4071), 'cifr.models.losses.contextual_loss.ContextualLoss', 'ContextualLoss', ([], {'use_vgg': '(True)', 'vgg_layer': '"""conv5_4"""'}), "(use_vgg=True, vgg_layer='conv5_4')\n", (4036, 4071), False, 'from cifr.models.losses.contextual_loss import ContextualLoss, ContextualBilateralLoss\n'), ((5506, 5532), 'tools.utils.requires_grad', 'requires_grad', (['disc', '(False)'], {}), '(disc, False)\n', (5519, 5532), False, 'from tools.utils import requires_grad\n'), ((5585, 5640), 'tools.utils.query_all_pixels', 'query_all_pixels', (['encoder', 'model', 'lr', 'coord', 'cell', '(1024)'], {}), '(encoder, model, lr, coord, cell, 1024)\n', (5601, 5640), False, 'from tools.utils import query_all_pixels\n'), ((5765, 5796), 'cifr.models.losses.gan_loss.g_nonsaturating_loss', 'g_nonsaturating_loss', (['fake_pred'], {}), '(fake_pred)\n', (5785, 5796), False, 'from cifr.models.losses.gan_loss import g_nonsaturating_loss\n'), ((6444, 6469), 'tools.utils.requires_grad', 'requires_grad', (['disc', '(True)'], {}), '(disc, True)\n', (6457, 6469), False, 'from tools.utils import requires_grad\n'), ((6631, 6668), 'cifr.models.losses.gan_loss.d_logistic_loss', 'd_logistic_loss', (['real_pred', 'fake_pred'], {}), '(real_pred, fake_pred)\n', (6646, 6668), False, 'from cifr.models.losses.gan_loss import d_logistic_loss\n'), ((7515, 7530), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7528, 7530), True, 'from torch import distributed as dist\n'), ((8333, 8402), 'tools.utils.save_pred_img', 'save_pred_img', (['encoder', 'model', 'test_loader', 'img_path', 'fig', 'rows', 'cols'], {}), '(encoder, model, test_loader, img_path, fig, rows, cols)\n', (8346, 8402), False, 'from tools.utils import save_pred_img\n'), ((2203, 2218), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2216, 2218), True, 'from torch import distributed as dist\n'), ((2392, 2407), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2405, 2407), True, 'from torch import distributed as dist\n'), ((2575, 2590), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2588, 2590), True, 'from torch import distributed as dist\n'), ((4252, 4281), 'os.path.basename', 'os.path.basename', (['args.config'], {}), '(args.config)\n', (4268, 4281), False, 'import os\n'), ((4829, 4844), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4842, 4844), True, 'from torch import distributed as dist\n'), ((6972, 6987), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (6985, 6987), True, 'from torch import distributed as dist\n'), ((5089, 5104), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5102, 5104), True, 'from torch import distributed as dist\n')] |
from __future__ import annotations
import ast
import pytest
from flake8_pie import Flake8PieCheck
from flake8_pie.pie804_no_unnecessary_dict_kwargs import PIE804
from flake8_pie.tests.utils import Error, ex, to_errors
EXAMPLES = [
ex(
code="""
foo(**{"bar": True})
""",
errors=[PIE804(lineno=2, col_offset=6)],
),
ex(
code="""
foo(**{"r2d2": True})
""",
errors=[PIE804(lineno=2, col_offset=6)],
),
ex(
code="""
Foo.objects.create(**{"bar": True})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
Foo.objects.create(**{"_id": some_id})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
Foo.objects.create(**{**bar})
""",
errors=[PIE804(lineno=2, col_offset=21)],
),
ex(
code="""
foo(**{**data, "foo": "buzz"})
foo(**buzz)
foo(**{"bar-foo": True})
foo(**{"bar foo": True})
foo(**{"1foo": True})
foo(**{buzz: True})
foo(**{"": True})
foo(**{f"buzz__{bar}": True})
""",
errors=[],
),
]
@pytest.mark.parametrize("code,errors", EXAMPLES)
def test_examples(code: str, errors: list[Error]) -> None:
expr = ast.parse(code)
assert to_errors(Flake8PieCheck(expr, filename="foo.py").run()) == errors
| [
"flake8_pie.pie804_no_unnecessary_dict_kwargs.PIE804",
"pytest.mark.parametrize",
"flake8_pie.Flake8PieCheck",
"ast.parse",
"flake8_pie.tests.utils.ex"
] | [((1060, 1108), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""code,errors"""', 'EXAMPLES'], {}), "('code,errors', EXAMPLES)\n", (1083, 1108), False, 'import pytest\n'), ((820, 1040), 'flake8_pie.tests.utils.ex', 'ex', ([], {'code': '"""\nfoo(**{**data, "foo": "buzz"})\nfoo(**buzz)\nfoo(**{"bar-foo": True})\nfoo(**{"bar foo": True})\nfoo(**{"1foo": True})\nfoo(**{buzz: True})\nfoo(**{"": True})\nfoo(**{f"buzz__{bar}": True})\n"""', 'errors': '[]'}), '(code=\n """\nfoo(**{**data, "foo": "buzz"})\nfoo(**buzz)\nfoo(**{"bar-foo": True})\nfoo(**{"bar foo": True})\nfoo(**{"1foo": True})\nfoo(**{buzz: True})\nfoo(**{"": True})\nfoo(**{f"buzz__{bar}": True})\n"""\n , errors=[])\n', (822, 1040), False, 'from flake8_pie.tests.utils import Error, ex, to_errors\n'), ((1179, 1194), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (1188, 1194), False, 'import ast\n'), ((302, 332), 'flake8_pie.pie804_no_unnecessary_dict_kwargs.PIE804', 'PIE804', ([], {'lineno': '(2)', 'col_offset': '(6)'}), '(lineno=2, col_offset=6)\n', (308, 332), False, 'from flake8_pie.pie804_no_unnecessary_dict_kwargs import PIE804\n'), ((410, 440), 'flake8_pie.pie804_no_unnecessary_dict_kwargs.PIE804', 'PIE804', ([], {'lineno': '(2)', 'col_offset': '(6)'}), '(lineno=2, col_offset=6)\n', (416, 440), False, 'from flake8_pie.pie804_no_unnecessary_dict_kwargs import PIE804\n'), ((532, 563), 'flake8_pie.pie804_no_unnecessary_dict_kwargs.PIE804', 'PIE804', ([], {'lineno': '(2)', 'col_offset': '(21)'}), '(lineno=2, col_offset=21)\n', (538, 563), False, 'from flake8_pie.pie804_no_unnecessary_dict_kwargs import PIE804\n'), ((658, 689), 'flake8_pie.pie804_no_unnecessary_dict_kwargs.PIE804', 'PIE804', ([], {'lineno': '(2)', 'col_offset': '(21)'}), '(lineno=2, col_offset=21)\n', (664, 689), False, 'from flake8_pie.pie804_no_unnecessary_dict_kwargs import PIE804\n'), ((775, 806), 'flake8_pie.pie804_no_unnecessary_dict_kwargs.PIE804', 'PIE804', ([], {'lineno': '(2)', 'col_offset': '(21)'}), '(lineno=2, col_offset=21)\n', (781, 806), False, 'from flake8_pie.pie804_no_unnecessary_dict_kwargs import PIE804\n'), ((1216, 1255), 'flake8_pie.Flake8PieCheck', 'Flake8PieCheck', (['expr'], {'filename': '"""foo.py"""'}), "(expr, filename='foo.py')\n", (1230, 1255), False, 'from flake8_pie import Flake8PieCheck\n')] |
import pyClarion.base as clb
import pyClarion.numdicts as nd
import unittest
import unittest.mock as mock
class TestProcess(unittest.TestCase):
@mock.patch.object(clb.Process, "_serves", clb.ConstructType.chunks)
def test_check_inputs_accepts_good_input_structure(self):
process = clb.Process(
expected=[clb.buffer("wm"), clb.terminus("selection")]
)
inputs = {
clb.buffer("wm"): nd.NumDict(default=0),
clb.terminus("selection"): nd.NumDict(default=0),
clb.terminus("selection2"): nd.NumDict(default=0)
}
process.check_inputs(inputs)
@mock.patch.object(clb.Process, "_serves", clb.ConstructType.chunks)
def test_check_inputs_rejects_incomplete_input(self):
process = clb.Process(
expected=[clb.chunks("in"), clb.terminus("selection")]
)
with self.assertRaises(RuntimeError):
inputs = {
# clb.buffer("wm"): nd.NumDict(default=0),
clb.terminus("selection"): nd.NumDict(default=0),
clb.terminus("selection2"): nd.NumDict(default=0)
}
process.check_inputs(inputs)
class TestWrappedProcess(unittest.TestCase):
pass
| [
"pyClarion.base.terminus",
"pyClarion.base.chunks",
"pyClarion.base.buffer",
"pyClarion.numdicts.NumDict",
"unittest.mock.patch.object"
] | [((153, 220), 'unittest.mock.patch.object', 'mock.patch.object', (['clb.Process', '"""_serves"""', 'clb.ConstructType.chunks'], {}), "(clb.Process, '_serves', clb.ConstructType.chunks)\n", (170, 220), True, 'import unittest.mock as mock\n'), ((642, 709), 'unittest.mock.patch.object', 'mock.patch.object', (['clb.Process', '"""_serves"""', 'clb.ConstructType.chunks'], {}), "(clb.Process, '_serves', clb.ConstructType.chunks)\n", (659, 709), True, 'import unittest.mock as mock\n'), ((424, 440), 'pyClarion.base.buffer', 'clb.buffer', (['"""wm"""'], {}), "('wm')\n", (434, 440), True, 'import pyClarion.base as clb\n'), ((477, 502), 'pyClarion.base.terminus', 'clb.terminus', (['"""selection"""'], {}), "('selection')\n", (489, 502), True, 'import pyClarion.base as clb\n'), ((539, 565), 'pyClarion.base.terminus', 'clb.terminus', (['"""selection2"""'], {}), "('selection2')\n", (551, 565), True, 'import pyClarion.base as clb\n'), ((442, 463), 'pyClarion.numdicts.NumDict', 'nd.NumDict', ([], {'default': '(0)'}), '(default=0)\n', (452, 463), True, 'import pyClarion.numdicts as nd\n'), ((504, 525), 'pyClarion.numdicts.NumDict', 'nd.NumDict', ([], {'default': '(0)'}), '(default=0)\n', (514, 525), True, 'import pyClarion.numdicts as nd\n'), ((567, 588), 'pyClarion.numdicts.NumDict', 'nd.NumDict', ([], {'default': '(0)'}), '(default=0)\n', (577, 588), True, 'import pyClarion.numdicts as nd\n'), ((1022, 1047), 'pyClarion.base.terminus', 'clb.terminus', (['"""selection"""'], {}), "('selection')\n", (1034, 1047), True, 'import pyClarion.base as clb\n'), ((1088, 1114), 'pyClarion.base.terminus', 'clb.terminus', (['"""selection2"""'], {}), "('selection2')\n", (1100, 1114), True, 'import pyClarion.base as clb\n'), ((1049, 1070), 'pyClarion.numdicts.NumDict', 'nd.NumDict', ([], {'default': '(0)'}), '(default=0)\n', (1059, 1070), True, 'import pyClarion.numdicts as nd\n'), ((1116, 1137), 'pyClarion.numdicts.NumDict', 'nd.NumDict', ([], {'default': '(0)'}), '(default=0)\n', (1126, 1137), True, 'import pyClarion.numdicts as nd\n'), ((337, 353), 'pyClarion.base.buffer', 'clb.buffer', (['"""wm"""'], {}), "('wm')\n", (347, 353), True, 'import pyClarion.base as clb\n'), ((355, 380), 'pyClarion.base.terminus', 'clb.terminus', (['"""selection"""'], {}), "('selection')\n", (367, 380), True, 'import pyClarion.base as clb\n'), ((822, 838), 'pyClarion.base.chunks', 'clb.chunks', (['"""in"""'], {}), "('in')\n", (832, 838), True, 'import pyClarion.base as clb\n'), ((840, 865), 'pyClarion.base.terminus', 'clb.terminus', (['"""selection"""'], {}), "('selection')\n", (852, 865), True, 'import pyClarion.base as clb\n')] |
import random
import requests
def clean_proxies():
proxies = []
with open('proxies', 'r') as handle:
contents = handle.read().strip()
for proxy in contents.split('\n'):
proxies.append(proxy)
proxy2 = []
print(proxies)
for proxy in proxies:
try:
response = requests.get('https://google.com', proxies={'https':'https://'+proxy}, timeout=8, verify=False)
proxy2.append(proxy)
except requests.exceptions.ConnectTimeout:
print(f'[-]\tProxy: {proxy} is taking too long to respond. Removing from the list...')
except requests.exceptions.ProxyError:
print(f'[-]\tProxy: {proxy} is dead. Removing from the list...')
proxies = proxy2
if len(proxies) == 0:
print("All proxies are dead or unavailable. We recommend you to renew the proxy list. In order to do that you need to edit the 'proxies' file.")
print("Execution Halt!")
exit(1)
with open('proxies', 'w') as handle:
for proxy in proxies:
handle.write(proxy + "\n")
def fetch_proxy():
proxies = []
with open('proxies', 'r') as handle:
contents = handle.read().strip()
for proxy in contents.split('\n'):
proxies.append(proxy)
index = random.randint(0,len(proxies)-1)
return {'https':'https://' + proxies[index],
'http':'http://' + proxies[index]}
| [
"requests.get"
] | [((275, 377), 'requests.get', 'requests.get', (['"""https://google.com"""'], {'proxies': "{'https': 'https://' + proxy}", 'timeout': '(8)', 'verify': '(False)'}), "('https://google.com', proxies={'https': 'https://' + proxy},\n timeout=8, verify=False)\n", (287, 377), False, 'import requests\n')] |
from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import *
from .forms import *
# Create your views here.
def index(request):
tasks = Task.objects.all()
form=TaskForm()
if request.method =='POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('/')
context = {'tasks':tasks,'form':form}
return render(request,'list.html', context)
def updateTask(request, pk):
task = Task.objects.get(id=pk)
form = TaskForm(instance=task)
if request.method == 'POST':
form = TaskForm(request.POST, instance=task)
if form.is_valid():
form.save()
return redirect('/')
context = {'form':form}
return render(request, 'update_task.html',context)
def deleteTask(request,pk):
item=Task.objects.get(id=pk)
if request.method=='POST':
item.delete()
return redirect('/')
context={'item':item}
return render(request,'delete.html',context)
| [
"django.shortcuts.render",
"django.shortcuts.redirect"
] | [((446, 483), 'django.shortcuts.render', 'render', (['request', '"""list.html"""', 'context'], {}), "(request, 'list.html', context)\n", (452, 483), False, 'from django.shortcuts import render, redirect\n'), ((767, 811), 'django.shortcuts.render', 'render', (['request', '"""update_task.html"""', 'context'], {}), "(request, 'update_task.html', context)\n", (773, 811), False, 'from django.shortcuts import render, redirect\n'), ((1004, 1043), 'django.shortcuts.render', 'render', (['request', '"""delete.html"""', 'context'], {}), "(request, 'delete.html', context)\n", (1010, 1043), False, 'from django.shortcuts import render, redirect\n'), ((375, 388), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (383, 388), False, 'from django.shortcuts import render, redirect\n'), ((949, 962), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (957, 962), False, 'from django.shortcuts import render, redirect\n'), ((714, 727), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (722, 727), False, 'from django.shortcuts import render, redirect\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import jinja2
import webapp2
import logging
import threading
from mandrill_email import *
from webapp2_extras import routes
from cookie import *
from settings import *
from decorators import *
from functions import *
from google.appengine.api import taskqueue
from google.appengine.datastore.datastore_query import Cursor
# HANDLERS
from application.handlers.pages.geoprocessing \
import GeoprocessingDashboardHandler
from application.handlers.pages.geoprocessing \
import GeoprocessingClassificationHandler
from application.handlers.pages.geoprocessing \
import GeoprocessingToolHandler
from application.handlers.pages.geoprocessing \
import GeoprocessingToolImagesHandler
from application.handlers.pages.geoprocessing \
import GeoprocessedPageHandler
from application.handlers.pages.geoprocessing \
import ForGeoprocessedPageHandler
from application.handlers.pages.statistics import StatisticsDashboard
from application.handlers.pages.statistics import StatisticsDashboard2
from application.handlers.pages.login import LoginHandler
from application.handlers.pages.loginoauth import LoginOauthHandler
from application.handlers.pages.verifylogincode import VerifyLoginCode
from application.handlers.pages.logoutapi import LogoutApiHandler
from application.handlers.pages.projectdashboard import ProjectDashboardHandler
from application.handlers.pages.logout import LogoutHandler
from application.handlers.pages.register import RegisterHandler
from application.handlers.pages.agencyadminregistration \
import AgencyAdminRegistrationHandler
from application.handlers.pages.dashboard import DashboardHandler
from application.handlers.pages.adminregister import AdminRegisterHandler
from application.handlers.pages.upload import UploadHandler
from application.handlers.pages.viewer import ViewerHandler
from application.handlers.pages.import_ import ImportHandler
from application.handlers.pages.invitedenvironment \
import InvitedEnvironmentHandler
from application.handlers.pages.scriptuploading import ScriptUploadingHandler
from application.handlers.pages.publicuserregistration \
import PublicUsersRegistrationHandler
from application.handlers.pages.passwordreset import PasswordResetHandler
from application.handlers.pages.verifyregister import VerifyRegisterHandler
from application.handlers.pages.sendverification import SendVerificationHandler
from application.handlers.pages.usergroups import UserGroupsHandler
from application.handlers.pages.classificationtokml \
import ClassificationToKMLHandler
from application.handlers.pages.environment import EnvironmentHandler
from application.handlers.pages.permission import PermissionHandler
from application.handlers.pages.taskqueueemails import TaskQueueEmailsHandler
from application.handlers.pages.taskcounter import TaskCounterHandler
from application.handlers.pages.taskimage import TaskImageHandler
from application.handlers.api.psgc import PSGCHandler
from application.handlers.api.redflags import RedFlagsHandler
from application.handlers.api.apiproxy import APIProxyHandler
from application.handlers.api.uacsapi import UACSAPIHandler
from application.handlers.api.uacsapiv2 import UACSAPIV2Handler
from application.handlers.api.usersapi import UsersApiHandler
from application.handlers.api.environmentsapi import EnvironmentsApiHandler
from application.handlers.api.usergroupsapi import UserGroupsApiHandler
from application.handlers.api.dataapi import DataApiHandler
from application.handlers.api.logs import LogsHandler
from application.handlers.api.classificationupload \
import ClassificationUploadHandler
from application.handlers.api.apikmldownloader import APIKMLDownloader
from application.handlers.api.dataapiupdate import DataApiUpdateHandler
from application.handlers.api.dataapipublish import DataApiPublishHandler
from application.handlers.api.dataapidetails import DataApiDetailsHandler
from application.handlers.api.kmllength import KMLLengthHandler
from application.handlers.api.program import ProgramAPIHandler
from application.handlers.pages.error import ErrorHandler
from application.handlers.pages.logexception import LogExceptionHandler
from application.handlers.pages.main_ import MainHandler
from application.handlers.pages.program import ProgramHandler
from application.handlers.pages.agency import AgencyHandler
from application.handlers.pages.workspace import WorkspaceHandler
from application.handlers.pages.new_statistics import NewStatisticsDashboard
from application.handlers.pages.generate_statistics import GenerateStatisticsHandler
from application.models.apidata import APIData
from google.appengine.ext import ndb
class TaskRePutHandler(webapp2.RequestHandler):
def post(self):
# get 50 records
n = 50
count = 0
curs = None
if self.request.get('cursor'):
curs = Cursor(urlsafe=self.request.get('cursor'))
if self.request.get('count'):
count = int(self.request.get('count'))
query = APIData.query().order(APIData.created_time)
data, cursor, more = query.fetch_page(n, start_cursor=curs)
# reput
if data:
ndb.put_multi(data)
count += len(data)
logging.debug('count: ' + str(count))
# pass cursor
if len(data) == n and cursor:
taskqueue.add(
url=('/api/v1/JMKr5roUu0EQyssRVv8mvkgXsmQBt3sgNDbfoBIkwoUi59dz'
'zQJnvmQ5jIlNtC4c'),
params={'cursor': cursor.urlsafe(), 'count': str(count)}
)
this_thread = threading.local()
jinja_workspace = jinja2.Environment(
loader=jinja2.FileSystemLoader('application/frontend/'),
autoescape=True,
trim_blocks=True)
jinja_workspace.filters['to_date_format_only'] = to_date_format_only
app = webapp2.WSGIApplication([
routes.DomainRoute(r'<:.*>', [
webapp2.Route('/', MainHandler),
webapp2.Route('/dashboard', DashboardHandler),
webapp2.Route('/dashboard/statistics', StatisticsDashboard),
webapp2.Route('/dashboard/statistics2', StatisticsDashboard2),
# webapp2.Route(r'/statistics/generate/<:.*>', GenerateStatisticsHandler),
webapp2.Route('/statistics/generate', GenerateStatisticsHandler),
webapp2.Route('/statistics', NewStatisticsDashboard),
webapp2.Route(r'/projects/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>',
ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>',
ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>/<:.*>/<:.*>/<:.*>',
ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>/<:.*>/<:.*>', ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>/<:.*>', ProjectDashboardHandler),
webapp2.Route(r'/projects/<:.*>', ProjectDashboardHandler),
webapp2.Route(r'/upload/<:.*>/<:.*>/<:.*>/<:.*>', UploadHandler),
webapp2.Route(r'/upload/<:.*>/<:.*>/<:.*>', UploadHandler),
webapp2.Route(r'/upload/<:.*>/<:.*>', UploadHandler),
webapp2.Route(r'/upload/<:.*>', UploadHandler),
webapp2.Route('/projects', ProjectDashboardHandler),
webapp2.Route(r'/programs/<:.*>/<:.*>', ProgramHandler),
webapp2.Route(r'/programs/<:.*>', ProgramHandler),
webapp2.Route('/programs', ProgramHandler),
webapp2.Route(r'/agencies/<:.*>', AgencyHandler),
webapp2.Route('/agencies', AgencyHandler),
webapp2.Route('/viewer', ViewerHandler),
webapp2.Route('/import', ImportHandler),
webapp2.Route(r'/import/<:.*>', ImportHandler),
webapp2.Route(r'/invite/workspace/<:.*>', InvitedEnvironmentHandler),
webapp2.Route(r'/su/<:.*>', ScriptUploadingHandler),
webapp2.Route('/login', LoginHandler),
webapp2.Route('/login/authorize', LoginOauthHandler),
webapp2.Route(r'/login/verify/<:.*>', VerifyLoginCode),
webapp2.Route('/logout', LogoutHandler),
webapp2.Route('/api/logout', LogoutApiHandler),
webapp2.Route('/register', RegisterHandler),
webapp2.Route('/admin/register', AdminRegisterHandler),
webapp2.Route('/register/verify', VerifyRegisterHandler),
webapp2.Route('/register/verify/send', SendVerificationHandler),
webapp2.Route('/agency/admins', AgencyAdminRegistrationHandler),
webapp2.Route('/users/registration', PublicUsersRegistrationHandler),
webapp2.Route('/password/reset', PasswordResetHandler),
webapp2.Route('/groups', UserGroupsHandler),
webapp2.Route(r'/groups/<:.*>', UserGroupsHandler),
webapp2.Route('/workspace', WorkspaceHandler),
webapp2.Route(r'/workspace/<:.*>', WorkspaceHandler),
webapp2.Route('/geoprocessing/dashboard',
GeoprocessingDashboardHandler),
webapp2.Route('/geoprocessing/for_geoprocessing',
ForGeoprocessedPageHandler),
webapp2.Route('/geoprocessing/geoprocessed', GeoprocessedPageHandler),
webapp2.Route('/geoprocessing/classification',
GeoprocessingClassificationHandler),
webapp2.Route('/geoprocessing/tool', GeoprocessingToolHandler),
webapp2.Route('/geoprocessing/tool/images',
GeoprocessingToolImagesHandler),
webapp2.Route('/geoprocessing/kml/download',
ClassificationToKMLHandler),
# TASKQUEUE
webapp2.Route('/tasks/email/send', TaskQueueEmailsHandler),
webapp2.Route('/tasks/counter', TaskCounterHandler),
webapp2.Route('/tasks/images', TaskImageHandler),
# API ENDPOINTS
webapp2.Route('/api/v1/length', KMLLengthHandler),
webapp2.Route(r'/api/v1/programs/<:.*>', ProgramAPIHandler),
webapp2.Route('/api/v1/programs', ProgramAPIHandler),
webapp2.Route('/api/v1/psgc', PSGCHandler),
webapp2.Route('/api/v1/redflags', RedFlagsHandler),
webapp2.Route('/api/v1/proxy', APIProxyHandler),
webapp2.Route('/api/v1/uacs', UACSAPIHandler),
webapp2.Route('/api/v2/uacs', UACSAPIV2Handler),
webapp2.Route('/api/v1/permissions', PermissionHandler),
webapp2.Route('/api/v1/users', UsersApiHandler),
webapp2.Route(r'/api/v1/users/<:.*>', UsersApiHandler),
webapp2.Route('/api/v1/workspaces', EnvironmentsApiHandler),
webapp2.Route(r'/api/v1/workspaces/<:.*>', EnvironmentsApiHandler),
webapp2.Route('/api/v1/groups', UserGroupsApiHandler),
webapp2.Route(r'/api/v1/groups/<:.*>', UserGroupsApiHandler),
webapp2.Route('/api/v1/classification', ClassificationUploadHandler),
webapp2.Route('/api/v1/KML', APIKMLDownloader),
webapp2.Route('/api/v1/data', DataApiHandler),
webapp2.Route(r'/api/v1/data/<:.*>/update', DataApiUpdateHandler),
webapp2.Route(r'/api/v1/data/<:.*>/publish', DataApiPublishHandler),
webapp2.Route(r'/api/v1/data/<:.*>', DataApiDetailsHandler),
webapp2.Route(r'/api/v1/logs', LogsHandler),
webapp2.Route(r'/<:.*>', ErrorHandler)
])
], debug=True)
app.error_handlers[500] = LogExceptionHandler.log_exception
| [
"threading.local",
"google.appengine.ext.ndb.put_multi",
"application.models.apidata.APIData.query",
"webapp2.Route",
"jinja2.FileSystemLoader"
] | [((5624, 5641), 'threading.local', 'threading.local', ([], {}), '()\n', (5639, 5641), False, 'import threading\n'), ((5691, 5739), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['"""application/frontend/"""'], {}), "('application/frontend/')\n", (5714, 5739), False, 'import jinja2\n'), ((5216, 5235), 'google.appengine.ext.ndb.put_multi', 'ndb.put_multi', (['data'], {}), '(data)\n', (5229, 5235), False, 'from google.appengine.ext import ndb\n'), ((5058, 5073), 'application.models.apidata.APIData.query', 'APIData.query', ([], {}), '()\n', (5071, 5073), False, 'from application.models.apidata import APIData\n'), ((5929, 5960), 'webapp2.Route', 'webapp2.Route', (['"""/"""', 'MainHandler'], {}), "('/', MainHandler)\n", (5942, 5960), False, 'import webapp2\n'), ((5970, 6015), 'webapp2.Route', 'webapp2.Route', (['"""/dashboard"""', 'DashboardHandler'], {}), "('/dashboard', DashboardHandler)\n", (5983, 6015), False, 'import webapp2\n'), ((6025, 6084), 'webapp2.Route', 'webapp2.Route', (['"""/dashboard/statistics"""', 'StatisticsDashboard'], {}), "('/dashboard/statistics', StatisticsDashboard)\n", (6038, 6084), False, 'import webapp2\n'), ((6094, 6155), 'webapp2.Route', 'webapp2.Route', (['"""/dashboard/statistics2"""', 'StatisticsDashboard2'], {}), "('/dashboard/statistics2', StatisticsDashboard2)\n", (6107, 6155), False, 'import webapp2\n'), ((6248, 6312), 'webapp2.Route', 'webapp2.Route', (['"""/statistics/generate"""', 'GenerateStatisticsHandler'], {}), "('/statistics/generate', GenerateStatisticsHandler)\n", (6261, 6312), False, 'import webapp2\n'), ((6322, 6374), 'webapp2.Route', 'webapp2.Route', (['"""/statistics"""', 'NewStatisticsDashboard'], {}), "('/statistics', NewStatisticsDashboard)\n", (6335, 6374), False, 'import webapp2\n'), ((6384, 6475), 'webapp2.Route', 'webapp2.Route', (['"""/projects/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>"""', 'ProjectDashboardHandler'], {}), "('/projects/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>',\n ProjectDashboardHandler)\n", (6397, 6475), False, 'import webapp2\n'), ((6504, 6589), 'webapp2.Route', 'webapp2.Route', (['"""/projects/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>"""', 'ProjectDashboardHandler'], {}), "('/projects/<:.*>/<:.*>/<:.*>/<:.*>/<:.*>',\n ProjectDashboardHandler)\n", (6517, 6589), False, 'import webapp2\n'), ((6618, 6693), 'webapp2.Route', 'webapp2.Route', (['"""/projects/<:.*>/<:.*>/<:.*>/<:.*>"""', 'ProjectDashboardHandler'], {}), "('/projects/<:.*>/<:.*>/<:.*>/<:.*>', ProjectDashboardHandler)\n", (6631, 6693), False, 'import webapp2\n'), ((6726, 6795), 'webapp2.Route', 'webapp2.Route', (['"""/projects/<:.*>/<:.*>/<:.*>"""', 'ProjectDashboardHandler'], {}), "('/projects/<:.*>/<:.*>/<:.*>', ProjectDashboardHandler)\n", (6739, 6795), False, 'import webapp2\n'), ((6806, 6869), 'webapp2.Route', 'webapp2.Route', (['"""/projects/<:.*>/<:.*>"""', 'ProjectDashboardHandler'], {}), "('/projects/<:.*>/<:.*>', ProjectDashboardHandler)\n", (6819, 6869), False, 'import webapp2\n'), ((6880, 6937), 'webapp2.Route', 'webapp2.Route', (['"""/projects/<:.*>"""', 'ProjectDashboardHandler'], {}), "('/projects/<:.*>', ProjectDashboardHandler)\n", (6893, 6937), False, 'import webapp2\n'), ((6948, 7011), 'webapp2.Route', 'webapp2.Route', (['"""/upload/<:.*>/<:.*>/<:.*>/<:.*>"""', 'UploadHandler'], {}), "('/upload/<:.*>/<:.*>/<:.*>/<:.*>', UploadHandler)\n", (6961, 7011), False, 'import webapp2\n'), ((7022, 7079), 'webapp2.Route', 'webapp2.Route', (['"""/upload/<:.*>/<:.*>/<:.*>"""', 'UploadHandler'], {}), "('/upload/<:.*>/<:.*>/<:.*>', UploadHandler)\n", (7035, 7079), False, 'import webapp2\n'), ((7090, 7141), 'webapp2.Route', 'webapp2.Route', (['"""/upload/<:.*>/<:.*>"""', 'UploadHandler'], {}), "('/upload/<:.*>/<:.*>', UploadHandler)\n", (7103, 7141), False, 'import webapp2\n'), ((7152, 7197), 'webapp2.Route', 'webapp2.Route', (['"""/upload/<:.*>"""', 'UploadHandler'], {}), "('/upload/<:.*>', UploadHandler)\n", (7165, 7197), False, 'import webapp2\n'), ((7208, 7259), 'webapp2.Route', 'webapp2.Route', (['"""/projects"""', 'ProjectDashboardHandler'], {}), "('/projects', ProjectDashboardHandler)\n", (7221, 7259), False, 'import webapp2\n'), ((7269, 7323), 'webapp2.Route', 'webapp2.Route', (['"""/programs/<:.*>/<:.*>"""', 'ProgramHandler'], {}), "('/programs/<:.*>/<:.*>', ProgramHandler)\n", (7282, 7323), False, 'import webapp2\n'), ((7334, 7382), 'webapp2.Route', 'webapp2.Route', (['"""/programs/<:.*>"""', 'ProgramHandler'], {}), "('/programs/<:.*>', ProgramHandler)\n", (7347, 7382), False, 'import webapp2\n'), ((7393, 7435), 'webapp2.Route', 'webapp2.Route', (['"""/programs"""', 'ProgramHandler'], {}), "('/programs', ProgramHandler)\n", (7406, 7435), False, 'import webapp2\n'), ((7445, 7492), 'webapp2.Route', 'webapp2.Route', (['"""/agencies/<:.*>"""', 'AgencyHandler'], {}), "('/agencies/<:.*>', AgencyHandler)\n", (7458, 7492), False, 'import webapp2\n'), ((7503, 7544), 'webapp2.Route', 'webapp2.Route', (['"""/agencies"""', 'AgencyHandler'], {}), "('/agencies', AgencyHandler)\n", (7516, 7544), False, 'import webapp2\n'), ((7554, 7593), 'webapp2.Route', 'webapp2.Route', (['"""/viewer"""', 'ViewerHandler'], {}), "('/viewer', ViewerHandler)\n", (7567, 7593), False, 'import webapp2\n'), ((7603, 7642), 'webapp2.Route', 'webapp2.Route', (['"""/import"""', 'ImportHandler'], {}), "('/import', ImportHandler)\n", (7616, 7642), False, 'import webapp2\n'), ((7652, 7697), 'webapp2.Route', 'webapp2.Route', (['"""/import/<:.*>"""', 'ImportHandler'], {}), "('/import/<:.*>', ImportHandler)\n", (7665, 7697), False, 'import webapp2\n'), ((7708, 7775), 'webapp2.Route', 'webapp2.Route', (['"""/invite/workspace/<:.*>"""', 'InvitedEnvironmentHandler'], {}), "('/invite/workspace/<:.*>', InvitedEnvironmentHandler)\n", (7721, 7775), False, 'import webapp2\n'), ((7786, 7836), 'webapp2.Route', 'webapp2.Route', (['"""/su/<:.*>"""', 'ScriptUploadingHandler'], {}), "('/su/<:.*>', ScriptUploadingHandler)\n", (7799, 7836), False, 'import webapp2\n'), ((7847, 7884), 'webapp2.Route', 'webapp2.Route', (['"""/login"""', 'LoginHandler'], {}), "('/login', LoginHandler)\n", (7860, 7884), False, 'import webapp2\n'), ((7894, 7946), 'webapp2.Route', 'webapp2.Route', (['"""/login/authorize"""', 'LoginOauthHandler'], {}), "('/login/authorize', LoginOauthHandler)\n", (7907, 7946), False, 'import webapp2\n'), ((7956, 8009), 'webapp2.Route', 'webapp2.Route', (['"""/login/verify/<:.*>"""', 'VerifyLoginCode'], {}), "('/login/verify/<:.*>', VerifyLoginCode)\n", (7969, 8009), False, 'import webapp2\n'), ((8020, 8059), 'webapp2.Route', 'webapp2.Route', (['"""/logout"""', 'LogoutHandler'], {}), "('/logout', LogoutHandler)\n", (8033, 8059), False, 'import webapp2\n'), ((8069, 8115), 'webapp2.Route', 'webapp2.Route', (['"""/api/logout"""', 'LogoutApiHandler'], {}), "('/api/logout', LogoutApiHandler)\n", (8082, 8115), False, 'import webapp2\n'), ((8125, 8168), 'webapp2.Route', 'webapp2.Route', (['"""/register"""', 'RegisterHandler'], {}), "('/register', RegisterHandler)\n", (8138, 8168), False, 'import webapp2\n'), ((8178, 8232), 'webapp2.Route', 'webapp2.Route', (['"""/admin/register"""', 'AdminRegisterHandler'], {}), "('/admin/register', AdminRegisterHandler)\n", (8191, 8232), False, 'import webapp2\n'), ((8242, 8298), 'webapp2.Route', 'webapp2.Route', (['"""/register/verify"""', 'VerifyRegisterHandler'], {}), "('/register/verify', VerifyRegisterHandler)\n", (8255, 8298), False, 'import webapp2\n'), ((8308, 8371), 'webapp2.Route', 'webapp2.Route', (['"""/register/verify/send"""', 'SendVerificationHandler'], {}), "('/register/verify/send', SendVerificationHandler)\n", (8321, 8371), False, 'import webapp2\n'), ((8381, 8444), 'webapp2.Route', 'webapp2.Route', (['"""/agency/admins"""', 'AgencyAdminRegistrationHandler'], {}), "('/agency/admins', AgencyAdminRegistrationHandler)\n", (8394, 8444), False, 'import webapp2\n'), ((8454, 8522), 'webapp2.Route', 'webapp2.Route', (['"""/users/registration"""', 'PublicUsersRegistrationHandler'], {}), "('/users/registration', PublicUsersRegistrationHandler)\n", (8467, 8522), False, 'import webapp2\n'), ((8532, 8586), 'webapp2.Route', 'webapp2.Route', (['"""/password/reset"""', 'PasswordResetHandler'], {}), "('/password/reset', PasswordResetHandler)\n", (8545, 8586), False, 'import webapp2\n'), ((8596, 8639), 'webapp2.Route', 'webapp2.Route', (['"""/groups"""', 'UserGroupsHandler'], {}), "('/groups', UserGroupsHandler)\n", (8609, 8639), False, 'import webapp2\n'), ((8649, 8698), 'webapp2.Route', 'webapp2.Route', (['"""/groups/<:.*>"""', 'UserGroupsHandler'], {}), "('/groups/<:.*>', UserGroupsHandler)\n", (8662, 8698), False, 'import webapp2\n'), ((8709, 8754), 'webapp2.Route', 'webapp2.Route', (['"""/workspace"""', 'WorkspaceHandler'], {}), "('/workspace', WorkspaceHandler)\n", (8722, 8754), False, 'import webapp2\n'), ((8764, 8815), 'webapp2.Route', 'webapp2.Route', (['"""/workspace/<:.*>"""', 'WorkspaceHandler'], {}), "('/workspace/<:.*>', WorkspaceHandler)\n", (8777, 8815), False, 'import webapp2\n'), ((8826, 8898), 'webapp2.Route', 'webapp2.Route', (['"""/geoprocessing/dashboard"""', 'GeoprocessingDashboardHandler'], {}), "('/geoprocessing/dashboard', GeoprocessingDashboardHandler)\n", (8839, 8898), False, 'import webapp2\n'), ((8930, 9007), 'webapp2.Route', 'webapp2.Route', (['"""/geoprocessing/for_geoprocessing"""', 'ForGeoprocessedPageHandler'], {}), "('/geoprocessing/for_geoprocessing', ForGeoprocessedPageHandler)\n", (8943, 9007), False, 'import webapp2\n'), ((9039, 9108), 'webapp2.Route', 'webapp2.Route', (['"""/geoprocessing/geoprocessed"""', 'GeoprocessedPageHandler'], {}), "('/geoprocessing/geoprocessed', GeoprocessedPageHandler)\n", (9052, 9108), False, 'import webapp2\n'), ((9118, 9204), 'webapp2.Route', 'webapp2.Route', (['"""/geoprocessing/classification"""', 'GeoprocessingClassificationHandler'], {}), "('/geoprocessing/classification',\n GeoprocessingClassificationHandler)\n", (9131, 9204), False, 'import webapp2\n'), ((9232, 9294), 'webapp2.Route', 'webapp2.Route', (['"""/geoprocessing/tool"""', 'GeoprocessingToolHandler'], {}), "('/geoprocessing/tool', GeoprocessingToolHandler)\n", (9245, 9294), False, 'import webapp2\n'), ((9304, 9379), 'webapp2.Route', 'webapp2.Route', (['"""/geoprocessing/tool/images"""', 'GeoprocessingToolImagesHandler'], {}), "('/geoprocessing/tool/images', GeoprocessingToolImagesHandler)\n", (9317, 9379), False, 'import webapp2\n'), ((9411, 9483), 'webapp2.Route', 'webapp2.Route', (['"""/geoprocessing/kml/download"""', 'ClassificationToKMLHandler'], {}), "('/geoprocessing/kml/download', ClassificationToKMLHandler)\n", (9424, 9483), False, 'import webapp2\n'), ((9535, 9593), 'webapp2.Route', 'webapp2.Route', (['"""/tasks/email/send"""', 'TaskQueueEmailsHandler'], {}), "('/tasks/email/send', TaskQueueEmailsHandler)\n", (9548, 9593), False, 'import webapp2\n'), ((9603, 9654), 'webapp2.Route', 'webapp2.Route', (['"""/tasks/counter"""', 'TaskCounterHandler'], {}), "('/tasks/counter', TaskCounterHandler)\n", (9616, 9654), False, 'import webapp2\n'), ((9664, 9712), 'webapp2.Route', 'webapp2.Route', (['"""/tasks/images"""', 'TaskImageHandler'], {}), "('/tasks/images', TaskImageHandler)\n", (9677, 9712), False, 'import webapp2\n'), ((9746, 9795), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/length"""', 'KMLLengthHandler'], {}), "('/api/v1/length', KMLLengthHandler)\n", (9759, 9795), False, 'import webapp2\n'), ((9805, 9863), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/programs/<:.*>"""', 'ProgramAPIHandler'], {}), "('/api/v1/programs/<:.*>', ProgramAPIHandler)\n", (9818, 9863), False, 'import webapp2\n'), ((9874, 9926), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/programs"""', 'ProgramAPIHandler'], {}), "('/api/v1/programs', ProgramAPIHandler)\n", (9887, 9926), False, 'import webapp2\n'), ((9936, 9978), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/psgc"""', 'PSGCHandler'], {}), "('/api/v1/psgc', PSGCHandler)\n", (9949, 9978), False, 'import webapp2\n'), ((9988, 10038), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/redflags"""', 'RedFlagsHandler'], {}), "('/api/v1/redflags', RedFlagsHandler)\n", (10001, 10038), False, 'import webapp2\n'), ((10048, 10095), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/proxy"""', 'APIProxyHandler'], {}), "('/api/v1/proxy', APIProxyHandler)\n", (10061, 10095), False, 'import webapp2\n'), ((10105, 10150), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/uacs"""', 'UACSAPIHandler'], {}), "('/api/v1/uacs', UACSAPIHandler)\n", (10118, 10150), False, 'import webapp2\n'), ((10160, 10207), 'webapp2.Route', 'webapp2.Route', (['"""/api/v2/uacs"""', 'UACSAPIV2Handler'], {}), "('/api/v2/uacs', UACSAPIV2Handler)\n", (10173, 10207), False, 'import webapp2\n'), ((10217, 10272), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/permissions"""', 'PermissionHandler'], {}), "('/api/v1/permissions', PermissionHandler)\n", (10230, 10272), False, 'import webapp2\n'), ((10282, 10329), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/users"""', 'UsersApiHandler'], {}), "('/api/v1/users', UsersApiHandler)\n", (10295, 10329), False, 'import webapp2\n'), ((10339, 10392), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/users/<:.*>"""', 'UsersApiHandler'], {}), "('/api/v1/users/<:.*>', UsersApiHandler)\n", (10352, 10392), False, 'import webapp2\n'), ((10403, 10462), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/workspaces"""', 'EnvironmentsApiHandler'], {}), "('/api/v1/workspaces', EnvironmentsApiHandler)\n", (10416, 10462), False, 'import webapp2\n'), ((10472, 10537), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/workspaces/<:.*>"""', 'EnvironmentsApiHandler'], {}), "('/api/v1/workspaces/<:.*>', EnvironmentsApiHandler)\n", (10485, 10537), False, 'import webapp2\n'), ((10548, 10601), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/groups"""', 'UserGroupsApiHandler'], {}), "('/api/v1/groups', UserGroupsApiHandler)\n", (10561, 10601), False, 'import webapp2\n'), ((10611, 10670), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/groups/<:.*>"""', 'UserGroupsApiHandler'], {}), "('/api/v1/groups/<:.*>', UserGroupsApiHandler)\n", (10624, 10670), False, 'import webapp2\n'), ((10681, 10749), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/classification"""', 'ClassificationUploadHandler'], {}), "('/api/v1/classification', ClassificationUploadHandler)\n", (10694, 10749), False, 'import webapp2\n'), ((10759, 10805), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/KML"""', 'APIKMLDownloader'], {}), "('/api/v1/KML', APIKMLDownloader)\n", (10772, 10805), False, 'import webapp2\n'), ((10815, 10860), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/data"""', 'DataApiHandler'], {}), "('/api/v1/data', DataApiHandler)\n", (10828, 10860), False, 'import webapp2\n'), ((10870, 10934), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/data/<:.*>/update"""', 'DataApiUpdateHandler'], {}), "('/api/v1/data/<:.*>/update', DataApiUpdateHandler)\n", (10883, 10934), False, 'import webapp2\n'), ((10945, 11011), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/data/<:.*>/publish"""', 'DataApiPublishHandler'], {}), "('/api/v1/data/<:.*>/publish', DataApiPublishHandler)\n", (10958, 11011), False, 'import webapp2\n'), ((11022, 11080), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/data/<:.*>"""', 'DataApiDetailsHandler'], {}), "('/api/v1/data/<:.*>', DataApiDetailsHandler)\n", (11035, 11080), False, 'import webapp2\n'), ((11092, 11134), 'webapp2.Route', 'webapp2.Route', (['"""/api/v1/logs"""', 'LogsHandler'], {}), "('/api/v1/logs', LogsHandler)\n", (11105, 11134), False, 'import webapp2\n'), ((11146, 11183), 'webapp2.Route', 'webapp2.Route', (['"""/<:.*>"""', 'ErrorHandler'], {}), "('/<:.*>', ErrorHandler)\n", (11159, 11183), False, 'import webapp2\n')] |
from railrl.data_management.simple_replay_pool import SimpleReplayPool
from railrl.predictors.dynamics_model import FullyConnectedEncoder, InverseModel, ForwardModel
import tensorflow as tf
import time
import numpy as np
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from railrl.misc.pyhelper_fns.vis_utils import MyAnimationMulti
def planner_info(arm_loss, box_loss, forward_models_outputs):
return {'arm_loss':arm_loss, 'box_loss':box_loss, \
'forward_models_outputs': forward_models_outputs}
def gather_cols(params, indices, name=None):
"""Gather columns of a 2D tensor.
Args:
params: A 2D tensor.
indices: A 1D tensor. Must be one of the following types: ``int32``, ``int64``.
name: A name for the operation (optional).
Returns:
A 2D Tensor. Has the same type as ``params``.
"""
with tf.op_scope([params, indices], name, "gather_cols") as scope:
# Check input
params = tf.convert_to_tensor(params, name="params")
indices = tf.convert_to_tensor(indices, name="indices")
try:
params.get_shape().assert_has_rank(2)
except ValueError:
raise ValueError('\'params\' must be 2D.')
try:
indices.get_shape().assert_has_rank(1)
except ValueError:
raise ValueError('\'params\' must be 1D.')
# Define op
p_shape = tf.shape(params)
p_flat = tf.reshape(params, [-1])
i_flat = tf.reshape(tf.reshape(tf.range(0, p_shape[0]) * p_shape[1],
[-1, 1]) + indices, [-1])
return tf.reshape(tf.gather(p_flat, i_flat),
[p_shape[0], -1])
"""
Planner takes two states (S_init and S_goal) and output an action.
Fine Tune is out of the scope of Planner
"""
class Planner(object):
def __init__(
self,
dynamic_model,
encoder,
sess
):
self.encoder = encoder
self.dynamic_model = dynamic_model
self.sess = sess
##initialize the model.....
def get_action(S_init, S_goal):
return None
"""
Inverde_model planner should be easy, just return the action
"""
class InverseModelPlanner(object):
def __init__(
self,
dynamic_model,
env,
encoder,
sess = None,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
#re-construct the dynamic model
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
self.inverse_model = dynamic_model.get_weight_tied_copy(feature_input1=encoder1.output,
feature_input2=encoder2.output)
def get_action(self, S_init, S_goal):
action = self.sess.run(self.inverse_model.output, feed_dict = \
{self.S_init_ph:S_init, self.S_goal_ph: S_goal})
return action
"""
ForwardModel planner, optimize action according to this objective:
min_{a} (S_next - S_goal)^2
"""
class CEMPlanner_arm_coord():
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
max_length = 15,
sample_batch_size = 2000,
top_k = 200,
action_penalty=False,
accumulated_loss = False):
self.sample_batch_size = sample_batch_size
self.top_k = top_k
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, None, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, [None, 24])
self.S_goal_ph = tf.placeholder(tf.float32, [None, 24])
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
self.encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output] #for debug purpose only
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
def transfer_box_global_tf(obs):
arm2box = gather_cols(obs, [4,5])/10.0
return gather_cols(obs, [21,22]) + arm2box
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
self.objective_topk_index_list = []
current_objective = 0
#objective
for forward_model in self.forward_model_list:
if accumulated_loss:
current_objective += tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)), axis = 1)
else:
current_objective = tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)), axis = 1)
self.objective_list.append(current_objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)))*100)
if action_penalty:
for i in range(len(self.objective_list)):
self.objective_list[i] += tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.5
def get_action(self, S_init, S_goal, steps = 1, plot_loss = False, debug = False, stop_variance = 0.2, stop_itr = 3, init_batch_size = 50000):
assert(steps <= self.max_length)
#fit a multivariable Gaussian
mean_list = None
cov_matrix = None
batch_S_init = np.dot(np.ones([init_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([init_batch_size, 1]), S_goal.reshape(1,-1))
#CEM
actions = np.random.rand(self.max_length, init_batch_size, 4)*2 - 1
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
# debug
# action_pen, objective_debug = self.sess.run([tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.3, self.objective_list[14]], feed_dict = {self.action_ph:actions, \
# self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
# import pdb; pdb.set_trace()
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
batch_S_init = np.dot(np.ones([self.sample_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([self.sample_batch_size, 1]), S_goal.reshape(1,-1))
for i in range(stop_itr-1):
actions = np.random.multivariate_normal(mean_list, cov_matrix, self.sample_batch_size).reshape(self.sample_batch_size, self.max_length, 4)
actions = np.moveaxis(actions, 0,1)
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
# import pdb; pdb.set_trace()
#if debug, visualize all forward model's output
best_action = best_actions[:,0,:]
arm_loss, box_loss,forward_models_outputs, final_objective = self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list, self.objective_list[steps-1]], \
{self.action_ph: best_action.reshape(15,1,4), \
self.S_init_ph:[S_init], self.S_goal_ph:[S_goal]})
print("final objective")
print(final_objective)
# import pdb; pdb.set_trace()
return best_actions[0,0], {'arm_loss':arm_loss, 'box_loss':box_loss, 'forward_models_outputs':forward_models_outputs[:steps]}
class CEMPlanner():
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
pos_only = True,
max_length = 15,
sample_batch_size = 2000,
top_k = 200,
action_penalty=False,
accumulated_loss = False):
self.sample_batch_size = sample_batch_size
self.top_k = top_k
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, None, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, [None]+list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, [None]+list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
self.encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output] #for debug purpose only
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
self.objective_topk_index_list = []
current_objective = 0
if pos_only:
for forward_model in self.forward_model_list:
if accumulated_loss:
current_objective += tf.reduce_sum(tf.square(gather_cols(forward_model.output, [4,5,6])\
- gather_cols(self.encoder2.output, [4,5,6])), axis = 1)
else:
current_objective = tf.reduce_sum(tf.square(gather_cols(forward_model.output, list(range(4,7)))\
- gather_cols(self.encoder2.output, list(range(4,7)))), axis = 1)
self.objective_list.append(current_objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0])))
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
if action_penalty:
for i in range(len(self.objective_list)):
self.objective_list[i] += tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.5
def get_action(self, S_init, S_goal, steps = 1, plot_loss = False, debug = False, stop_variance = 0.2, stop_itr = 3, init_batch_size = 50000):
assert(steps <= self.max_length)
#fit a multivariable Gaussian
mean_list = None
cov_matrix = None
batch_S_init = np.dot(np.ones([init_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([init_batch_size, 1]), S_goal.reshape(1,-1))
#CEM
actions = np.random.rand(self.max_length, init_batch_size, 4)*2 - 1
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
#debug
# action_pen, objective_debug = self.sess.run([tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.3, self.objective_list[14]], feed_dict = {self.action_ph:actions, \
# self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
# import pdb; pdb.set_trace()
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
batch_S_init = np.dot(np.ones([self.sample_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([self.sample_batch_size, 1]), S_goal.reshape(1,-1))
for i in range(stop_itr-1):
actions = np.random.multivariate_normal(mean_list, cov_matrix, self.sample_batch_size).reshape(self.sample_batch_size, self.max_length, 4)
actions = np.moveaxis(actions, 0,1)
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
# import pdb; pdb.set_trace()
#if debug, visualize all forward model's output
best_action = best_actions[:,0,:]
arm_loss, box_loss,forward_models_outputs, final_objective = self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list, self.objective_list[steps-1]], \
{self.action_ph: best_action.reshape(15,1,4), \
self.S_init_ph:[S_init], self.S_goal_ph:[S_goal]})
print("final objective")
print(final_objective)
arm_obj = np.sum(np.square(forward_models_outputs[steps-1][0][:4] - S_goal[:4]))
box_obj = np.sum(np.square(forward_models_outputs[steps-1][0][4:7] - S_goal[4:7]))
print('arm objective is {}, box objective is {}'.format(arm_obj, box_obj))
# import pdb; pdb.set_trace()
return best_actions[0,0], {'arm_loss':arm_loss, 'box_loss':box_loss, 'forward_models_outputs':forward_models_outputs[:steps]}
class FastClippedSgdShootingForwardModelPlanner_cumulated_obj(object):
def __init__(
self,
dynamic_model,
encoder,
env,
init_lr = 0.5,
sess = None,
pos_only = False,
max_length = 15,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.init_lr = init_lr
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, 1, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
## objective
self.objective_list = []
self.forward_model_loss_list = []
self.arm_loss_list = []
self.box_loss_list = []
objective = 0
factor = 1
if pos_only:
for forward_model in self.forward_model_list:
factor=factor*0.4
self.forward_model_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6])))
objective += factor*tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6]))
self.objective_list.append(objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
objective += tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0]))
self.objective_list.append(objective)
self.action_grad_list = []
for obj in self.objective_list:
#those tail term in action_ph will receive 0 gradient
self.action_grad_list.append(tf.gradients(obj, self.action_ph))
self.vis_tool = MyAnimationMulti(None, numPlots=2, isIm=[0,0], axTitles=['(S1-S_goal)^2', 'sum(S_i-S_goal)^2'])
def get_action(self, S_init, S_goal, steps = None, plot_loss = False):
if steps == None:
steps = 1 #greedy planner
else:
assert(steps <= self.max_length)
action = np.zeros([self.max_length, 1, 4])
action_grad = self.action_grad_list[steps - 1]
# TODO: Find a good stop criteria
now = time.time()
S1_loss_list = []
Sn_loss_list = []
for i in range(0,101):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : action}
S1_loss, Sn_loss = self.sess.run([self.objective_list[0], self.objective_list[steps-1]], feed_dict=feed_dict)
S1_loss_list.append(S1_loss)
Sn_loss_list.append(Sn_loss)
if plot_loss and i%20 ==0:
self.vis_tool._display([[range(i+1), S1_loss_list],[range(i+1), Sn_loss_list]])
gradient = np.array(self.sess.run(action_grad, feed_dict = feed_dict)[0])
if np.isnan(gradient).any():
action = np.random.rand(self.max_length, 1, 4)-0.5
print('nan gradient step{}'.format(i))
import pdb; pdb.set_trace()
else:
if np.linalg.norm(gradient) > steps*4:
gradient = gradient/np.linalg.norm(gradient)*4*steps
action -= gradient/1.0*self.init_lr
action = np.clip(action, -1, 1)
# if i %200 == 0:
# print("#########Optimizing action#########")
# action_loss, predicted_next_state = self.sess.run([self.objective_list[steps-1], self.forward_model_list[steps-1].output], feed_dict = feed_dict)
# box_loss = np.sum(np.square(predicted_next_state[0][4:6] - S_goal[4:6]))
# arm_loss = np.sum(np.square(predicted_next_state[0][0:4] - S_goal[0:4]))
# print("action_loss(sum_square_error(S_goal, S_next)) is {}, box_loss is {}, arm_loss is {}".format(action_loss, box_loss, arm_loss))
# print("current_action is {}".format(action[0][0]))
# # print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
# print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
# now = time.time()
return action[0][0], self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], self.forward_model_list[0].output], feed_dict)
class FastClippedSgdShootingForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
init_lr = 0.5,
sess = None,
pos_only = False,
max_length = 15,
):
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.init_lr = init_lr
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, 1, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output]
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
if pos_only:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6])))
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0])))
self.action_grad_list = []
for obj in self.objective_list:
#those tail term in action_ph will receive 0 gradient
self.action_grad_list.append(tf.gradients(obj, self.action_ph))
self.vis_tool = MyAnimationMulti(None, numPlots=2, isIm=[0,0], axTitles=['(S1-S_goal)^2', '(S_n-S_goal)^2'])
def get_action(self, S_init, S_goal, steps = None, plot_loss = False):
if steps == None:
steps = 1 #greedy planner
else:
assert(steps <= self.max_length)
action = np.zeros([self.max_length, 1, 4])
action_grad = self.action_grad_list[steps - 1]
# TODO: Find a good stop criteria
now = time.time()
S1_loss_list = []
Sn_loss_list = []
for i in range(0,51):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : action}
S1_loss, Sn_loss = self.sess.run([self.box_loss_list[0], self.box_loss_list[steps-1]], feed_dict=feed_dict)
S1_loss_list.append(S1_loss)
Sn_loss_list.append(Sn_loss)
if plot_loss and i %1 == 0:
self.vis_tool._display([[range(i+1), S1_loss_list],[range(i+1), Sn_loss_list]])
gradient = np.array(self.sess.run(action_grad, feed_dict = feed_dict)[0])
if np.isnan(gradient).any():
action = np.random.rand(self.max_length, 1, 4)-0.5
print('nan gradient step{}'.format(i))
import pdb; pdb.set_trace()
else:
if np.linalg.norm(gradient) > steps*4:
gradient = gradient/np.linalg.norm(gradient)*4*steps
action -= gradient/(1.+i*0.05)*self.init_lr
action = np.clip(action, -1, 1)
arm_loss, box_loss, forward_models_outputs = \
self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list], feed_dict)
return action[0][0], planner_info(arm_loss, box_loss, forward_models_outputs[:steps])
class FastClippedSgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
# with tf.variable_scope('action_optimizer'):
# self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.action_ph = tf.placeholder(tf.float32, [None, 4])
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
self.arm_loss = tf.reduce_sum(tf.square(self.forward_model.output[0][:4] - self.encoder2.output[0][:4]))
self.box_loss = tf.reduce_sum(tf.square(self.forward_model.output[0][4:6] - self.encoder2.output[0][4:6]))
#Adam optimizer has its own variables. Wrap it by a namescope
self.action_grad = tf.gradients(self.objective, self.action_ph)
# with tf.variable_scope('action_optimizer'):
# self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.clipped_action])
# self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
# variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
# self.sess.run(tf.initialize_variables(variables))
action = np.random.rand(4)-0.5
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,151):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : [action]}
gradient = self.sess.run([self.action_grad], feed_dict = feed_dict)[0][0][0]
#raises NotImplementedError: ('Trying to optimize unsupported type ', <tf.Tensor 'clip_by_value:0' shape=(1, 4) dtype=float32>)
#this code does not work....
# import pdb; pdb.set_trace()
action -= gradient/(1.+i*0.2)*0.5
action = np.clip(action, -1, 1)
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(action))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
now = time.time()
return action, self.sess.run([ self.arm_loss, self.box_loss], feed_dict = feed_dict)
class SgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1e-1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
##re-construct the model
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.clipped_action = tf.clip_by_value(self.action, -1, 1)
# import pdb; pdb.set_trace()
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
#Adam optimizer has its own variables. Wrap it by a namescope
with tf.variable_scope('action_optimizer'):
self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.clipped_action])
# self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
self.sess.run(tf.initialize_variables(variables))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,150):
gradient = self.sess.run([self.action_opt], feed_dict = feed_dict)
#raises NotImplementedError: ('Trying to optimize unsupported type ', <tf.Tensor 'clip_by_value:0' shape=(1, 4) dtype=float32>)
#this code does not work....
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(self.sess.run(self.action)))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
now = time.time()
return self.sess.run([self.action, self.objective], feed_dict = feed_dict)
#debug API
def predict_next_state(self, current_state, action, goal_state):
feed_dict = {self.S_init_ph:current_state, self.S_goal_ph: goal_state}
old_action = self.sess.run(self.action)
#assign new action
self.sess.run(self.action.assign([action]))
next_state, S_init, S_goal, loss = self.sess.run([self.forward_model.output,\
self.encoder1.output,\
self.encoder2.output,\
self.objective], feed_dict = feed_dict)
#assign back the old action
self.sess.run(self.action.assign(old_action))
return next_state, S_init, S_goal, loss
class ClippedSgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1e-1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
##re-construct the model
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.clipped_action = tf.clip_by_value(self.action, -1, 1)
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
#Adam optimizer has its own variables. Wrap it by a namescope
with tf.variable_scope('action_optimizer'):
self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.action])
self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
self.sess.run(tf.initialize_variables(variables))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,150):
#normal speed
self.sess.run([self.action_opt], feed_dict = feed_dict)
#slow and will be slower and slower
# self.sess.run([self.clipped_action, self.action.assign(self.clipped_action), self.action_opt], \
# feed_dict = feed_dict)
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(self.sess.run(self.clipped_action)))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 100 gradient steps".format(time.time() - now))
now = time.time()
return self.sess.run([self.action, self.objective], feed_dict = feed_dict)
#debug API
def predict_next_state(self, current_state, action, goal_state):
feed_dict = {self.S_init_ph:current_state, self.S_goal_ph: goal_state}
old_action = self.sess.run(self.action)
#assign new action
self.sess.run(self.action.assign([action]))
next_state, S_init, S_goal, loss = self.sess.run([self.forward_model.output,\
self.encoder1.output,\
self.encoder2.output,\
self.objective], feed_dict = feed_dict)
#assign back the old action
self.sess.run(self.action.assign(old_action))
return next_state, S_init, S_goal, loss
from sandbox.rocky.tf.core.parameterized import Parameterized
class ParameterizedAction(Parameterized):
def __init__(self, env, sess, action_initializer = None):
Parameterized.__init__(self)
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.sess = sess
self.env = env
def get_action(self):
return self.sess.run(self.action)
def initalize_action(self):
self.sess.run(tf.initialize_variables(self.action))
return
class ConstrainedForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
pos_only = False,
action_initializer = None,
optimizer = tf.contrib.opt.ScipyOptimizerInterface,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1,4], initializer=action_initializer)
## rebuild the dynamic model
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
self.loss = self.objective
self.inequalities = []
for i in range(4):
self.inequalities.append(1-tf.square(self.action[0][i]))
# Our default SciPy optimization algorithm, L-BFGS-B, does not support
# general constraints. Thus we use SLSQP instead.
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
self.sess.run(tf.initialize_variables([self.action]))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# need to re-initialize optimizer every time want to use it or it will optimize action without enforcing constrains.
optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.loss, var_list = [self.action], inequalities=self.inequalities, method='SLSQP')
now = time.time()
optimizer.minimize(self.sess, feed_dict = feed_dict)
print("it takes {} to optimize the action".format(time.time() - now))
return self.sess.run([self.action, self.loss], feed_dict = feed_dict) | [
"numpy.clip",
"tensorflow.shape",
"numpy.random.rand",
"tensorflow.get_variable",
"tensorflow.get_default_session",
"tensorflow.gradients",
"numpy.argsort",
"tensorflow.contrib.opt.ScipyOptimizerInterface",
"numpy.moveaxis",
"numpy.linalg.norm",
"numpy.cov",
"railrl.misc.pyhelper_fns.vis_utils... | [((886, 937), 'tensorflow.op_scope', 'tf.op_scope', (['[params, indices]', 'name', '"""gather_cols"""'], {}), "([params, indices], name, 'gather_cols')\n", (897, 937), True, 'import tensorflow as tf\n'), ((987, 1030), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['params'], {'name': '"""params"""'}), "(params, name='params')\n", (1007, 1030), True, 'import tensorflow as tf\n'), ((1049, 1094), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['indices'], {'name': '"""indices"""'}), "(indices, name='indices')\n", (1069, 1094), True, 'import tensorflow as tf\n'), ((1425, 1441), 'tensorflow.shape', 'tf.shape', (['params'], {}), '(params)\n', (1433, 1441), True, 'import tensorflow as tf\n'), ((1459, 1483), 'tensorflow.reshape', 'tf.reshape', (['params', '[-1]'], {}), '(params, [-1])\n', (1469, 1483), True, 'import tensorflow as tf\n'), ((3592, 3641), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[max_length, None, 4]'], {}), '(tf.float32, [max_length, None, 4])\n', (3606, 3641), True, 'import tensorflow as tf\n'), ((3741, 3779), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 24]'], {}), '(tf.float32, [None, 24])\n', (3755, 3779), True, 'import tensorflow as tf\n'), ((3799, 3837), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 24]'], {}), '(tf.float32, [None, 24])\n', (3813, 3837), True, 'import tensorflow as tf\n'), ((6940, 6968), 'numpy.cov', 'np.cov', (['trans_best_actions.T'], {}), '(trans_best_actions.T)\n', (6946, 6968), True, 'import numpy as np\n'), ((6983, 7020), 'numpy.mean', 'np.mean', (['trans_best_actions.T'], {'axis': '(1)'}), '(trans_best_actions.T, axis=1)\n', (6990, 7020), True, 'import numpy as np\n'), ((8988, 9037), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[max_length, None, 4]'], {}), '(tf.float32, [max_length, None, 4])\n', (9002, 9037), True, 'import tensorflow as tf\n'), ((12640, 12668), 'numpy.cov', 'np.cov', (['trans_best_actions.T'], {}), '(trans_best_actions.T)\n', (12646, 12668), True, 'import numpy as np\n'), ((12683, 12720), 'numpy.mean', 'np.mean', (['trans_best_actions.T'], {'axis': '(1)'}), '(trans_best_actions.T, axis=1)\n', (12690, 12720), True, 'import numpy as np\n'), ((14843, 14889), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[max_length, 1, 4]'], {}), '(tf.float32, [max_length, 1, 4])\n', (14857, 14889), True, 'import tensorflow as tf\n'), ((16912, 17012), 'railrl.misc.pyhelper_fns.vis_utils.MyAnimationMulti', 'MyAnimationMulti', (['None'], {'numPlots': '(2)', 'isIm': '[0, 0]', 'axTitles': "['(S1-S_goal)^2', 'sum(S_i-S_goal)^2']"}), "(None, numPlots=2, isIm=[0, 0], axTitles=['(S1-S_goal)^2',\n 'sum(S_i-S_goal)^2'])\n", (16928, 17012), False, 'from railrl.misc.pyhelper_fns.vis_utils import MyAnimationMulti\n'), ((17187, 17220), 'numpy.zeros', 'np.zeros', (['[self.max_length, 1, 4]'], {}), '([self.max_length, 1, 4])\n', (17195, 17220), True, 'import numpy as np\n'), ((17314, 17325), 'time.time', 'time.time', ([], {}), '()\n', (17323, 17325), False, 'import time\n'), ((19509, 19555), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[max_length, 1, 4]'], {}), '(tf.float32, [max_length, 1, 4])\n', (19523, 19555), True, 'import tensorflow as tf\n'), ((21420, 21517), 'railrl.misc.pyhelper_fns.vis_utils.MyAnimationMulti', 'MyAnimationMulti', (['None'], {'numPlots': '(2)', 'isIm': '[0, 0]', 'axTitles': "['(S1-S_goal)^2', '(S_n-S_goal)^2']"}), "(None, numPlots=2, isIm=[0, 0], axTitles=['(S1-S_goal)^2',\n '(S_n-S_goal)^2'])\n", (21436, 21517), False, 'from railrl.misc.pyhelper_fns.vis_utils import MyAnimationMulti\n'), ((21691, 21724), 'numpy.zeros', 'np.zeros', (['[self.max_length, 1, 4]'], {}), '([self.max_length, 1, 4])\n', (21699, 21724), True, 'import numpy as np\n'), ((21818, 21829), 'time.time', 'time.time', ([], {}), '()\n', (21827, 21829), False, 'import time\n'), ((23449, 23486), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 4]'], {}), '(tf.float32, [None, 4])\n', (23463, 23486), True, 'import tensorflow as tf\n'), ((24520, 24564), 'tensorflow.gradients', 'tf.gradients', (['self.objective', 'self.action_ph'], {}), '(self.objective, self.action_ph)\n', (24532, 24564), True, 'import tensorflow as tf\n'), ((25165, 25176), 'time.time', 'time.time', ([], {}), '()\n', (25174, 25176), False, 'import time\n'), ((26815, 26851), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.action', '(-1)', '(1)'], {}), '(self.action, -1, 1)\n', (26831, 26851), True, 'import tensorflow as tf\n'), ((28070, 28137), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.VARIABLES'], {'scope': '"""action_optimizer"""'}), "(tf.GraphKeys.VARIABLES, scope='action_optimizer')\n", (28087, 28137), True, 'import tensorflow as tf\n'), ((28301, 28312), 'time.time', 'time.time', ([], {}), '()\n', (28310, 28312), False, 'import time\n'), ((30362, 30398), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.action', '(-1)', '(1)'], {}), '(self.action, -1, 1)\n', (30378, 30398), True, 'import tensorflow as tf\n'), ((31582, 31649), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.VARIABLES'], {'scope': '"""action_optimizer"""'}), "(tf.GraphKeys.VARIABLES, scope='action_optimizer')\n", (31599, 31649), True, 'import tensorflow as tf\n'), ((31810, 31821), 'time.time', 'time.time', ([], {}), '()\n', (31819, 31821), False, 'import time\n'), ((33453, 33481), 'sandbox.rocky.tf.core.parameterized.Parameterized.__init__', 'Parameterized.__init__', (['self'], {}), '(self)\n', (33475, 33481), False, 'from sandbox.rocky.tf.core.parameterized import Parameterized\n'), ((35902, 36027), 'tensorflow.contrib.opt.ScipyOptimizerInterface', 'tf.contrib.opt.ScipyOptimizerInterface', (['self.loss'], {'var_list': '[self.action]', 'inequalities': 'self.inequalities', 'method': '"""SLSQP"""'}), "(self.loss, var_list=[self.action],\n inequalities=self.inequalities, method='SLSQP')\n", (35940, 36027), True, 'import tensorflow as tf\n'), ((36038, 36049), 'time.time', 'time.time', ([], {}), '()\n', (36047, 36049), False, 'import time\n'), ((1652, 1677), 'tensorflow.gather', 'tf.gather', (['p_flat', 'i_flat'], {}), '(p_flat, i_flat)\n', (1661, 1677), True, 'import tensorflow as tf\n'), ((2319, 2343), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2341, 2343), True, 'import tensorflow as tf\n'), ((3495, 3519), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (3517, 3519), True, 'import tensorflow as tf\n'), ((6071, 6100), 'numpy.ones', 'np.ones', (['[init_batch_size, 1]'], {}), '([init_batch_size, 1])\n', (6078, 6100), True, 'import numpy as np\n'), ((6148, 6177), 'numpy.ones', 'np.ones', (['[init_batch_size, 1]'], {}), '([init_batch_size, 1])\n', (6155, 6177), True, 'import numpy as np\n'), ((6468, 6494), 'numpy.argsort', 'np.argsort', (['objective_list'], {}), '(objective_list)\n', (6478, 6494), True, 'import numpy as np\n'), ((7047, 7083), 'numpy.ones', 'np.ones', (['[self.sample_batch_size, 1]'], {}), '([self.sample_batch_size, 1])\n', (7054, 7083), True, 'import numpy as np\n'), ((7131, 7167), 'numpy.ones', 'np.ones', (['[self.sample_batch_size, 1]'], {}), '([self.sample_batch_size, 1])\n', (7138, 7167), True, 'import numpy as np\n'), ((7376, 7402), 'numpy.moveaxis', 'np.moveaxis', (['actions', '(0)', '(1)'], {}), '(actions, 0, 1)\n', (7387, 7402), True, 'import numpy as np\n'), ((7779, 7807), 'numpy.cov', 'np.cov', (['trans_best_actions.T'], {}), '(trans_best_actions.T)\n', (7785, 7807), True, 'import numpy as np\n'), ((7823, 7860), 'numpy.mean', 'np.mean', (['trans_best_actions.T'], {'axis': '(1)'}), '(trans_best_actions.T, axis=1)\n', (7830, 7860), True, 'import numpy as np\n'), ((8891, 8915), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (8913, 8915), True, 'import tensorflow as tf\n'), ((11772, 11801), 'numpy.ones', 'np.ones', (['[init_batch_size, 1]'], {}), '([init_batch_size, 1])\n', (11779, 11801), True, 'import numpy as np\n'), ((11849, 11878), 'numpy.ones', 'np.ones', (['[init_batch_size, 1]'], {}), '([init_batch_size, 1])\n', (11856, 11878), True, 'import numpy as np\n'), ((12169, 12195), 'numpy.argsort', 'np.argsort', (['objective_list'], {}), '(objective_list)\n', (12179, 12195), True, 'import numpy as np\n'), ((12747, 12783), 'numpy.ones', 'np.ones', (['[self.sample_batch_size, 1]'], {}), '([self.sample_batch_size, 1])\n', (12754, 12783), True, 'import numpy as np\n'), ((12831, 12867), 'numpy.ones', 'np.ones', (['[self.sample_batch_size, 1]'], {}), '([self.sample_batch_size, 1])\n', (12838, 12867), True, 'import numpy as np\n'), ((13076, 13102), 'numpy.moveaxis', 'np.moveaxis', (['actions', '(0)', '(1)'], {}), '(actions, 0, 1)\n', (13087, 13102), True, 'import numpy as np\n'), ((13479, 13507), 'numpy.cov', 'np.cov', (['trans_best_actions.T'], {}), '(trans_best_actions.T)\n', (13485, 13507), True, 'import numpy as np\n'), ((13523, 13560), 'numpy.mean', 'np.mean', (['trans_best_actions.T'], {'axis': '(1)'}), '(trans_best_actions.T, axis=1)\n', (13530, 13560), True, 'import numpy as np\n'), ((14081, 14145), 'numpy.square', 'np.square', (['(forward_models_outputs[steps - 1][0][:4] - S_goal[:4])'], {}), '(forward_models_outputs[steps - 1][0][:4] - S_goal[:4])\n', (14090, 14145), True, 'import numpy as np\n'), ((14164, 14230), 'numpy.square', 'np.square', (['(forward_models_outputs[steps - 1][0][4:7] - S_goal[4:7])'], {}), '(forward_models_outputs[steps - 1][0][4:7] - S_goal[4:7])\n', (14173, 14230), True, 'import numpy as np\n'), ((14721, 14745), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (14743, 14745), True, 'import tensorflow as tf\n'), ((19387, 19411), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (19409, 19411), True, 'import tensorflow as tf\n'), ((23208, 23232), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (23230, 23232), True, 'import tensorflow as tf\n'), ((24251, 24324), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(self.forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (24260, 24324), True, 'import tensorflow as tf\n'), ((24358, 24433), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(self.forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (24367, 24433), True, 'import tensorflow as tf\n'), ((25096, 25113), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (25110, 25113), True, 'import numpy as np\n'), ((25619, 25641), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (25626, 25641), True, 'import numpy as np\n'), ((26437, 26461), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (26459, 26461), True, 'import tensorflow as tf\n'), ((26568, 26622), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), '(minval=-0.1, maxval=0.1)\n', (26597, 26622), True, 'import tensorflow as tf\n'), ((26633, 26670), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (26650, 26670), True, 'import tensorflow as tf\n'), ((27685, 27722), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (27702, 27722), True, 'import tensorflow as tf\n'), ((28154, 28188), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['variables'], {}), '(variables)\n', (28177, 28188), True, 'import tensorflow as tf\n'), ((29984, 30008), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (30006, 30008), True, 'import tensorflow as tf\n'), ((30115, 30169), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), '(minval=-0.1, maxval=0.1)\n', (30144, 30169), True, 'import tensorflow as tf\n'), ((30180, 30217), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (30197, 30217), True, 'import tensorflow as tf\n'), ((31200, 31237), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (31217, 31237), True, 'import tensorflow as tf\n'), ((31666, 31700), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['variables'], {}), '(variables)\n', (31689, 31700), True, 'import tensorflow as tf\n'), ((33542, 33596), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), '(minval=-0.1, maxval=0.1)\n', (33571, 33596), True, 'import tensorflow as tf\n'), ((33608, 33645), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (33625, 33645), True, 'import tensorflow as tf\n'), ((33906, 33942), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['self.action'], {}), '(self.action)\n', (33929, 33942), True, 'import tensorflow as tf\n'), ((34231, 34255), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (34253, 34255), True, 'import tensorflow as tf\n'), ((34332, 34386), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), '(minval=-0.1, maxval=0.1)\n', (34361, 34386), True, 'import tensorflow as tf\n'), ((34397, 34434), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (34414, 34434), True, 'import tensorflow as tf\n'), ((34453, 34526), 'tensorflow.get_variable', 'tf.get_variable', (['"""planner_action"""', '[1, 4]'], {'initializer': 'action_initializer'}), "('planner_action', [1, 4], initializer=action_initializer)\n", (34468, 34526), True, 'import tensorflow as tf\n'), ((35665, 35703), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['[self.action]'], {}), '([self.action])\n', (35688, 35703), True, 'import tensorflow as tf\n'), ((6220, 6271), 'numpy.random.rand', 'np.random.rand', (['self.max_length', 'init_batch_size', '(4)'], {}), '(self.max_length, init_batch_size, 4)\n', (6234, 6271), True, 'import numpy as np\n'), ((6869, 6900), 'numpy.moveaxis', 'np.moveaxis', (['best_actions', '(0)', '(1)'], {}), '(best_actions, 0, 1)\n', (6880, 6900), True, 'import numpy as np\n'), ((7598, 7624), 'numpy.argsort', 'np.argsort', (['objective_list'], {}), '(objective_list)\n', (7608, 7624), True, 'import numpy as np\n'), ((11921, 11972), 'numpy.random.rand', 'np.random.rand', (['self.max_length', 'init_batch_size', '(4)'], {}), '(self.max_length, init_batch_size, 4)\n', (11935, 11972), True, 'import numpy as np\n'), ((12569, 12600), 'numpy.moveaxis', 'np.moveaxis', (['best_actions', '(0)', '(1)'], {}), '(best_actions, 0, 1)\n', (12580, 12600), True, 'import numpy as np\n'), ((13298, 13324), 'numpy.argsort', 'np.argsort', (['objective_list'], {}), '(objective_list)\n', (13308, 13324), True, 'import numpy as np\n'), ((16859, 16892), 'tensorflow.gradients', 'tf.gradients', (['obj', 'self.action_ph'], {}), '(obj, self.action_ph)\n', (16871, 16892), True, 'import tensorflow as tf\n'), ((17997, 18012), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (18010, 18012), False, 'import pdb\n'), ((18182, 18204), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (18189, 18204), True, 'import numpy as np\n'), ((21367, 21400), 'tensorflow.gradients', 'tf.gradients', (['obj', 'self.action_ph'], {}), '(obj, self.action_ph)\n', (21379, 21400), True, 'import tensorflow as tf\n'), ((22499, 22514), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (22512, 22514), False, 'import pdb\n'), ((22692, 22714), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (22699, 22714), True, 'import numpy as np\n'), ((24037, 24110), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (24046, 24110), True, 'import tensorflow as tf\n'), ((24154, 24213), 'tensorflow.square', 'tf.square', (['(self.forward_model.output - self.encoder2.output)'], {}), '(self.forward_model.output - self.encoder2.output)\n', (24163, 24213), True, 'import tensorflow as tf\n'), ((26108, 26119), 'time.time', 'time.time', ([], {}), '()\n', (26117, 26119), False, 'import time\n'), ((27431, 27504), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (27440, 27504), True, 'import tensorflow as tf\n'), ((27548, 27607), 'tensorflow.square', 'tf.square', (['(self.forward_model.output - self.encoder2.output)'], {}), '(self.forward_model.output - self.encoder2.output)\n', (27557, 27607), True, 'import tensorflow as tf\n'), ((29064, 29075), 'time.time', 'time.time', ([], {}), '()\n', (29073, 29075), False, 'import time\n'), ((30946, 31019), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (30955, 31019), True, 'import tensorflow as tf\n'), ((31063, 31122), 'tensorflow.square', 'tf.square', (['(self.forward_model.output - self.encoder2.output)'], {}), '(self.forward_model.output - self.encoder2.output)\n', (31072, 31122), True, 'import tensorflow as tf\n'), ((32604, 32615), 'time.time', 'time.time', ([], {}), '()\n', (32613, 32615), False, 'import time\n'), ((35104, 35177), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (35113, 35177), True, 'import tensorflow as tf\n'), ((35221, 35280), 'tensorflow.square', 'tf.square', (['(self.forward_model.output - self.encoder2.output)'], {}), '(self.forward_model.output - self.encoder2.output)\n', (35230, 35280), True, 'import tensorflow as tf\n'), ((5399, 5467), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (5408, 5467), True, 'import tensorflow as tf\n'), ((7234, 7310), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_list', 'cov_matrix', 'self.sample_batch_size'], {}), '(mean_list, cov_matrix, self.sample_batch_size)\n', (7263, 7310), True, 'import numpy as np\n'), ((7707, 7738), 'numpy.moveaxis', 'np.moveaxis', (['best_actions', '(0)', '(1)'], {}), '(best_actions, 0, 1)\n', (7718, 7738), True, 'import numpy as np\n'), ((12934, 13010), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_list', 'cov_matrix', 'self.sample_batch_size'], {}), '(mean_list, cov_matrix, self.sample_batch_size)\n', (12963, 13010), True, 'import numpy as np\n'), ((13407, 13438), 'numpy.moveaxis', 'np.moveaxis', (['best_actions', '(0)', '(1)'], {}), '(best_actions, 0, 1)\n', (13418, 13438), True, 'import numpy as np\n'), ((16599, 16659), 'tensorflow.square', 'tf.square', (['(forward_model.output[0] - self.encoder2.output[0])'], {}), '(forward_model.output[0] - self.encoder2.output[0])\n', (16608, 16659), True, 'import tensorflow as tf\n'), ((17857, 17875), 'numpy.isnan', 'np.isnan', (['gradient'], {}), '(gradient)\n', (17865, 17875), True, 'import numpy as np\n'), ((17896, 17933), 'numpy.random.rand', 'np.random.rand', (['self.max_length', '(1)', '(4)'], {}), '(self.max_length, 1, 4)\n', (17910, 17933), True, 'import numpy as np\n'), ((18029, 18053), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (18043, 18053), True, 'import numpy as np\n'), ((22359, 22377), 'numpy.isnan', 'np.isnan', (['gradient'], {}), '(gradient)\n', (22367, 22377), True, 'import numpy as np\n'), ((22398, 22435), 'numpy.random.rand', 'np.random.rand', (['self.max_length', '(1)', '(4)'], {}), '(self.max_length, 1, 4)\n', (22412, 22435), True, 'import numpy as np\n'), ((22531, 22555), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (22545, 22555), True, 'import numpy as np\n'), ((27745, 27776), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['init_lr'], {}), '(init_lr)\n', (27767, 27776), True, 'import tensorflow as tf\n'), ((31260, 31291), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['init_lr'], {}), '(init_lr)\n', (31282, 31291), True, 'import tensorflow as tf\n'), ((31369, 31400), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['init_lr'], {}), '(init_lr)\n', (31391, 31400), True, 'import tensorflow as tf\n'), ((35392, 35420), 'tensorflow.square', 'tf.square', (['self.action[0][i]'], {}), '(self.action[0][i])\n', (35401, 35420), True, 'import tensorflow as tf\n'), ((36162, 36173), 'time.time', 'time.time', ([], {}), '()\n', (36171, 36173), False, 'import time\n'), ((1523, 1546), 'tensorflow.range', 'tf.range', (['(0)', 'p_shape[0]'], {}), '(0, p_shape[0])\n', (1531, 1546), True, 'import tensorflow as tf\n'), ((5743, 5768), 'tensorflow.square', 'tf.square', (['self.action_ph'], {}), '(self.action_ph)\n', (5752, 5768), True, 'import tensorflow as tf\n'), ((10749, 10817), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (10758, 10817), True, 'import tensorflow as tf\n'), ((10864, 10934), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (10873, 10934), True, 'import tensorflow as tf\n'), ((11039, 11099), 'tensorflow.square', 'tf.square', (['(forward_model.output[0] - self.encoder2.output[0])'], {}), '(forward_model.output[0] - self.encoder2.output[0])\n', (11048, 11099), True, 'import tensorflow as tf\n'), ((11146, 11214), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (11155, 11214), True, 'import tensorflow as tf\n'), ((11261, 11331), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (11270, 11331), True, 'import tensorflow as tf\n'), ((11444, 11469), 'tensorflow.square', 'tf.square', (['self.action_ph'], {}), '(self.action_ph)\n', (11453, 11469), True, 'import tensorflow as tf\n'), ((16053, 16121), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (16062, 16121), True, 'import tensorflow as tf\n'), ((16167, 16235), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (16176, 16235), True, 'import tensorflow as tf\n'), ((16323, 16391), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (16332, 16391), True, 'import tensorflow as tf\n'), ((16438, 16508), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (16447, 16508), True, 'import tensorflow as tf\n'), ((20743, 20811), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (20752, 20811), True, 'import tensorflow as tf\n'), ((20858, 20926), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (20867, 20926), True, 'import tensorflow as tf\n'), ((20973, 21043), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (20982, 21043), True, 'import tensorflow as tf\n'), ((21148, 21208), 'tensorflow.square', 'tf.square', (['(forward_model.output[0] - self.encoder2.output[0])'], {}), '(forward_model.output[0] - self.encoder2.output[0])\n', (21157, 21208), True, 'import tensorflow as tf\n'), ((26078, 26089), 'time.time', 'time.time', ([], {}), '()\n', (26087, 26089), False, 'import time\n'), ((29034, 29045), 'time.time', 'time.time', ([], {}), '()\n', (29043, 29045), False, 'import time\n'), ((32574, 32585), 'time.time', 'time.time', ([], {}), '()\n', (32583, 32585), False, 'import time\n'), ((18090, 18114), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (18104, 18114), True, 'import numpy as np\n'), ((22592, 22616), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (22606, 22616), True, 'import numpy as np\n')] |
# coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>.
OpenAPI spec version: 2.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.auth_controller_api import AuthControllerApi
class TestAuthControllerApi(unittest.TestCase):
""" AuthControllerApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.auth_controller_api.AuthControllerApi()
def tearDown(self):
pass
def test_activate_user_using_post(self):
"""
Test case for activate_user_using_post
activateUser
"""
pass
def test_change_password_using_post(self):
"""
Test case for change_password_using_post
changePassword
"""
pass
def test_check_activate_token_using_get(self):
"""
Test case for check_activate_token_using_get
checkActivateToken
"""
pass
def test_check_reset_token_using_get(self):
"""
Test case for check_reset_token_using_get
checkResetToken
"""
pass
def test_get_user_using_get(self):
"""
Test case for get_user_using_get
getUser
"""
pass
def test_request_reset_password_by_email_using_post(self):
"""
Test case for request_reset_password_by_email_using_post
requestResetPasswordByEmail
"""
pass
def test_reset_password_using_post(self):
"""
Test case for reset_password_using_post
resetPassword
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"swagger_client.apis.auth_controller_api.AuthControllerApi"
] | [((1941, 1956), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1954, 1956), False, 'import unittest\n'), ((676, 735), 'swagger_client.apis.auth_controller_api.AuthControllerApi', 'swagger_client.apis.auth_controller_api.AuthControllerApi', ([], {}), '()\n', (733, 735), False, 'import swagger_client\n')] |
import numpy as np
import matplotlib.pyplot as plt
from docx import Document
from docx.shared import Cm
import math
def split_file(file):
"""split the file by different queries into seperate list element and return one list as a whole. """
answer = [[]]
j = 0
for i in file:
if i == "\n":
j += 1
if j == 16: # answer list only needs to add 15 additional empty sublists
pass
else:
answer.append([])
elif i[:3] == "VOM":
answer[j].append(i)
return answer
def calculate(retrieve, order, relevant_number):
"""Return recall and precision of each found relevant element."""
recall = round(retrieve / relevant_number, 4)
precision = round(retrieve / order, 4)
return recall, precision
def recall_interval(recall):
"""Return the interval of recall classified to 11 equal intervals of 10 (the field range is 0-100)"""
return ((recall*10)//1)*10
def compare_interpolate(recall_area):
"""
o the interpolation within each interval and beyond accoording to the function.
Return the lists of precisions and interpolated recalls.
"""
final_r = []
final_p = []
for i in range(100, -1, -10):
final_r.append(i)
if recall_area[i] != []:
# interpolate if the max precision is smaller than the larger interval
if i != 100 and recall_area[i][0][0]*100 < final_p[-1]:
final_p.append(final_p[-1])
else:
final_p.append(recall_area[i][0][0]*100)
# if no precision is in the interval, use the precision of larger interval
else:
final_p.append(final_p[-1])
return final_p, final_r
def mean_average_precision(answer_set, relevant_set):
"""calculte mean average precision by summing up all precision and
divide the sum by teh number of relevant elements."""
order = 0
retrieve = 0
sum = 0
relevant_number = len(relevant_set)
for i in range(len(answer_set)):
order += 1
for j in relevant_set:
if answer_set[i][:21] == j[:21]:
retrieve += 1
recall, precision = calculate(retrieve, order, relevant_number)
# r.append(recall)
# p.append(precision)
sum += precision
if retrieve > len(relevant_set):
break
# compute the mean average precision
mean_ap = sum/relevant_number
return mean_ap
def interpolate(answer_set, relevant_set):
order = 0
retrieve = 0
recall_area = {0:[], 10:[], 20:[], 30:[], 40:[], 50:[], 60:[], 70:[], 80:[], 90:[], 100:[]}
r = []
p = []
relevant_number = len(relevant_set)
for i in range(len(answer_set)):
order += 1
for j in relevant_set:
if answer_set[i][:21] == j[:21]:
retrieve += 1
recall, precision = calculate(retrieve, order, relevant_number)
recall_area[recall_interval(recall)].append((precision, recall))
r.append(recall)
p.append(precision)
if retrieve > len(relevant_set):
break
# interpolation of the precision
for i in recall_area.values():
i.sort(reverse = True)
final_p, final_r = compare_interpolate(recall_area)
final_r = []
final_p = []
for i in range(100, -1, -10):
final_r.append(i)
if recall_area[i] != []:
if i != 100 and recall_area[i][0][0]*100 < final_p[-1]:
# interpolate if the max precision is smaller than the larger interval
final_p.append(final_p[-1])
else:
final_p.append(recall_area[i][0][0]*100)
else:
# if no precision is in the interval, use the precision of larger interval
final_p.append(final_p[-1])
return final_r, final_p
with open('HW1_ResultsTrainSet.txt', 'r') as answer_set:
answer = split_file(answer_set)
with open('HW1_AssessmentTrainSet.txt', 'r') as relevant_set:
relevance = split_file(relevant_set)
total_precision = {x:[] for x in range(100, -1, -10)}
for i in range(16):
r, p = interpolate(answer[i], relevance[i])
for i in r:
total_precision[i].append(p[r.index(i)])
final_precision = []
final_recall = [x for x in range(100, -1, -10)]
for i in total_precision.values():
sum = 0
for j in i:
sum += j
result = sum/16
if final_precision != [] and result < final_precision[-1]:
# interpolate if the max precision is smaller than the larger interval
final_precision.append(final_precision[-1])
else:
final_precision.append(result)
plt.plot(final_recall, final_precision, marker = ".")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Interpolated Recall-Precision Curve", loc = 'center')
#set x, y axis to fixed range
plt.axis([0,100,0,100])
plt.savefig("interpolate.png")
plt.close("interpolate.png")
plt.clf()
plt.cla()
# calculate map
total_ap = 0
for i in range(16):
mAP = mean_average_precision(answer[i], relevance[i])
total_ap += mAP
total_map = round(total_ap/16, 8)
map_answer = "mAP = " + str(total_map)
print(map_answer)
# calculate dcg
# score range is 0-3 and equally distributed among the relevant set
def assign_score(relevant_set):
"""Assign score to each relevant element in descending order and return the score list."""
section = len(relevance[0])//3
score = []
s = 3
for i in range(3):
if s == 1:
num = len(relevance[0]) - len(score)
score.extend([s]*num)
else:
score.extend([s]*section)
s -= 1
return score
def gain(answer_set, relevant_set, score_list):
"""Form a score list based on the answer set and return the rank list."""
rank_list = []
order_list = []
for i in range(len(answer_set)):
item = answer_set[i][:21] + "\n"
if item in relevant_set:
rank_list.append(score_list[relevant_set.index(item)])
order_list.append(item)
else:
rank_list.append(0)
order_list.append("no")
c = rank_list.count(0)
return rank_list
def cumulative_gain(rank_list):
"""Calculate the cumulative gain based on the rank list and return a list."""
cumulative_set = []
cumulative_set.append(rank_list[0])
for i in range(1, len(rank_list)):
cg = cumulative_set[i-1] + rank_list[i]
cumulative_set.append(cg)
return cumulative_set
def discounted_cumulative_gain(rank_list):
"""Calculate the discounted cumulative gain based on the input rank list and return a list."""
discounted_cg = []
discounted_cg.append(rank_list[0])
for i in range(1, len(rank_list)):
d = rank_list[i]/math.log2(i+1)
dcg = d + discounted_cg[i-1]
discounted_cg.append(dcg)
return discounted_cg
def ideal_dcg(score, answer_set_number):
"""Calculate the ideal discounted cumulative gain based on a descending rank list and return a list."""
ideal_set = score
added = answer_set_number - len(score)
ideal_set.extend([0]*added)
idgc = discounted_cumulative_gain(ideal_set)
return idgc
def normalized_dcg(query_number, answer_set, relevant_set, score, rank_list):
"""Calculate normalized discounted cumulative gain of various queries and return a list."""
total_dcg = []
total_idcg = []
for i in range(query_number):
dcg = discounted_cumulative_gain(rank_list)
total_dcg.append(dcg)
idcg = ideal_dcg(score, len(answer_set[i]))
total_idcg.append(idcg)
final_idcg = 0
final_dcg = 0
total_ndcg = []
for i in range(len(answer_set[0])):
for j in range(query_number):
final_idcg += total_idcg[j][i]
final_dcg += total_dcg[j][i]
ndcg = final_dcg / final_idcg
total_ndcg.append(ndcg)
return total_ndcg
score = assign_score(relevance[0])
rank = gain(answer[0], relevance[0], score)
cg = cumulative_gain(rank)
discounted_cumulative_gain(rank)
ndcg = normalized_dcg(16, answer, relevance, score, rank)
plt.plot(ndcg, 'g')
plt.xlabel("Answer Set")
plt.title("Normalized Discounted Cumulated Gain", loc = 'center')
plt.axis([0, 2500, 0 , 1])
plt.savefig("NDCG.png")
# combine graph and answer into one document
document = Document()
document.add_heading('Information Retrieval HW1', 0)
p1 = document.add_paragraph('Interpolated precision recall curve', style = 'List Number')
document.add_picture('interpolate.png', width=Cm(12))
p2 = document.add_paragraph('Mean average precision\n', style = 'List Number')
p2.add_run(map_answer)
p3 = document.add_paragraph('Normalized discounted cumulated gain', style = 'List Number')
document.add_picture('NDCG.png', width=Cm(12))
document.save('90899703Y_HW1.docx')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"docx.shared.Cm",
"math.log2",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.close",
"matplotlib.pyplot.title",
"matplotlib.pyplot.cla",
"docx.Document"
] | [((4774, 4825), 'matplotlib.pyplot.plot', 'plt.plot', (['final_recall', 'final_precision'], {'marker': '"""."""'}), "(final_recall, final_precision, marker='.')\n", (4782, 4825), True, 'import matplotlib.pyplot as plt\n'), ((4829, 4849), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (4839, 4849), True, 'import matplotlib.pyplot as plt\n'), ((4850, 4873), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (4860, 4873), True, 'import matplotlib.pyplot as plt\n'), ((4874, 4936), 'matplotlib.pyplot.title', 'plt.title', (['"""Interpolated Recall-Precision Curve"""'], {'loc': '"""center"""'}), "('Interpolated Recall-Precision Curve', loc='center')\n", (4883, 4936), True, 'import matplotlib.pyplot as plt\n'), ((4969, 4995), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 100, 0, 100]'], {}), '([0, 100, 0, 100])\n', (4977, 4995), True, 'import matplotlib.pyplot as plt\n'), ((4993, 5023), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""interpolate.png"""'], {}), "('interpolate.png')\n", (5004, 5023), True, 'import matplotlib.pyplot as plt\n'), ((5024, 5052), 'matplotlib.pyplot.close', 'plt.close', (['"""interpolate.png"""'], {}), "('interpolate.png')\n", (5033, 5052), True, 'import matplotlib.pyplot as plt\n'), ((5053, 5062), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5060, 5062), True, 'import matplotlib.pyplot as plt\n'), ((5063, 5072), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5070, 5072), True, 'import matplotlib.pyplot as plt\n'), ((8239, 8258), 'matplotlib.pyplot.plot', 'plt.plot', (['ndcg', '"""g"""'], {}), "(ndcg, 'g')\n", (8247, 8258), True, 'import matplotlib.pyplot as plt\n'), ((8259, 8283), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Answer Set"""'], {}), "('Answer Set')\n", (8269, 8283), True, 'import matplotlib.pyplot as plt\n'), ((8284, 8347), 'matplotlib.pyplot.title', 'plt.title', (['"""Normalized Discounted Cumulated Gain"""'], {'loc': '"""center"""'}), "('Normalized Discounted Cumulated Gain', loc='center')\n", (8293, 8347), True, 'import matplotlib.pyplot as plt\n'), ((8350, 8375), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 2500, 0, 1]'], {}), '([0, 2500, 0, 1])\n', (8358, 8375), True, 'import matplotlib.pyplot as plt\n'), ((8377, 8400), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""NDCG.png"""'], {}), "('NDCG.png')\n", (8388, 8400), True, 'import matplotlib.pyplot as plt\n'), ((8458, 8468), 'docx.Document', 'Document', ([], {}), '()\n', (8466, 8468), False, 'from docx import Document\n'), ((8660, 8666), 'docx.shared.Cm', 'Cm', (['(12)'], {}), '(12)\n', (8662, 8666), False, 'from docx.shared import Cm\n'), ((8902, 8908), 'docx.shared.Cm', 'Cm', (['(12)'], {}), '(12)\n', (8904, 8908), False, 'from docx.shared import Cm\n'), ((6884, 6900), 'math.log2', 'math.log2', (['(i + 1)'], {}), '(i + 1)\n', (6893, 6900), False, 'import math\n')] |
import csv
import re
import netifaces as ni
from twisted.internet import defer
from twisted.names import client
from pygear.logging import log
from pygear.core.six.moves.urllib.parse import urlparse, urljoin
from .interfaces import ITaskStorage
csv.register_dialect('pipes', delimiter='|')
client_callback_schemes = ['http', 'https']
default_scheme = 'http'
client_scheme_re = re.compile(r'^(%s)' % '|'.join(client_callback_schemes))
ip_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
ip_scheme_re = re.compile(r"^(%s)://(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" % '|'.join(client_callback_schemes))
def get_tasks_db(config, app):
return app.getComponent(ITaskStorage)
def prepare_url(url):
if not client_scheme_re.match(url):
url = default_scheme + url
return url
def get_interface_ip(eth):
eth = ni.ifaddresses(eth)
return eth[2][0]['addr']
@defer.inlineCallbacks
def parse_clients_list(file_path):
trusted_clients = None
# @TODO create a service to read trusted clients from DB
try:
trusted_clients = open(file_path, 'r').readlines()
trusted_clients = map(lambda c: c.replace('\n', ''), trusted_clients)
except IOError:
_clients = []
log.warn("Trusted clinets list not found.")
clients_list = {}
if trusted_clients:
for row in csv.reader(trusted_clients, dialect='pipes', quotechar='!'):
_host, _user, _pass = row
if ip_re.match(_host):
_ip = _host
else:
_host = prepare_url(_host)
parsed_url = urlparse(_host)
_ip = yield client.getHostByName(parsed_url.netloc)
clients_list[_ip] = {'host': _host, 'user': _user, 'pass': _pass}
defer.returnValue(clients_list)
def get_callback_auth_details(url, trusted_clients):
match = ip_scheme_re.match(url)
if not match or len(match.groups()) < 2:
ip = client.getHostByName(url)
else:
scheme, ip = match.groups()
for client_ip, details in trusted_clients.iteritems():
if ip == client_ip:
return details['user'], details['pass']
return None
def get_serve_uri(config):
rest_port = config.getint('rest_port', 4000)
eth = config.get('eth', 'eth1') # private IP
rest_host = 'http://%s' % (get_interface_ip(eth))
files_static_serve_path = config.get('static_serve_path', 'files')
if rest_host.endswith('/'):
rest_host = rest_host[:-1]
base_url = '{0}:{1}/'.format(rest_host, rest_port)
if not files_static_serve_path.endswith('/'):
files_static_serve_path += '/'
return urljoin(base_url, files_static_serve_path)
def get_file_path(filename, base_path):
pass | [
"csv.register_dialect",
"pygear.logging.log.warn",
"re.compile",
"twisted.names.client.getHostByName",
"twisted.internet.defer.returnValue",
"netifaces.ifaddresses",
"pygear.core.six.moves.urllib.parse.urljoin",
"pygear.core.six.moves.urllib.parse.urlparse",
"csv.reader"
] | [((249, 293), 'csv.register_dialect', 'csv.register_dialect', (['"""pipes"""'], {'delimiter': '"""|"""'}), "('pipes', delimiter='|')\n", (269, 293), False, 'import csv\n'), ((447, 504), 're.compile', 're.compile', (['"""^\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}$"""'], {}), "('^\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}$')\n", (457, 504), False, 'import re\n'), ((838, 857), 'netifaces.ifaddresses', 'ni.ifaddresses', (['eth'], {}), '(eth)\n', (852, 857), True, 'import netifaces as ni\n'), ((2652, 2694), 'pygear.core.six.moves.urllib.parse.urljoin', 'urljoin', (['base_url', 'files_static_serve_path'], {}), '(base_url, files_static_serve_path)\n', (2659, 2694), False, 'from pygear.core.six.moves.urllib.parse import urlparse, urljoin\n'), ((1341, 1400), 'csv.reader', 'csv.reader', (['trusted_clients'], {'dialect': '"""pipes"""', 'quotechar': '"""!"""'}), "(trusted_clients, dialect='pipes', quotechar='!')\n", (1351, 1400), False, 'import csv\n'), ((1764, 1795), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['clients_list'], {}), '(clients_list)\n', (1781, 1795), False, 'from twisted.internet import defer\n'), ((1945, 1970), 'twisted.names.client.getHostByName', 'client.getHostByName', (['url'], {}), '(url)\n', (1965, 1970), False, 'from twisted.names import client\n'), ((1231, 1274), 'pygear.logging.log.warn', 'log.warn', (['"""Trusted clinets list not found."""'], {}), "('Trusted clinets list not found.')\n", (1239, 1274), False, 'from pygear.logging import log\n'), ((1593, 1608), 'pygear.core.six.moves.urllib.parse.urlparse', 'urlparse', (['_host'], {}), '(_host)\n', (1601, 1608), False, 'from pygear.core.six.moves.urllib.parse import urlparse, urljoin\n'), ((1637, 1676), 'twisted.names.client.getHostByName', 'client.getHostByName', (['parsed_url.netloc'], {}), '(parsed_url.netloc)\n', (1657, 1676), False, 'from twisted.names import client\n')] |
import os
import json
import math
from neuralparticles.tensorflow.tools.hyper_parameter import HyperParameter, ValueType, SearchType
from neuralparticles.tensorflow.tools.hyper_search import HyperSearch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import keras
from neuralparticles.tensorflow.models.PUNet import PUNet
from neuralparticles.tools.param_helpers import *
from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles
from neuralparticles.tensorflow.tools.eval_helpers import EvalCallback, EvalCompleteCallback
import numpy as np
data_path = getParam("data", "data/")
config_path = getParam("config", "config/version_00.txt")
verbose = int(getParam("verbose", 0)) != 0
gpu = getParam("gpu", "")
epochs = int(getParam("epochs", 0))
eval_cnt = int(getParam("eval_cnt", 5))
eval_dataset = getParam("eval_d", []) #'18,18,18,19,19'
eval_t = getParam("eval_t", []) #'5,5,6,6,7'
eval_var = getParam("eval_v", []) #'0,0,0,0,0'
eval_patch_idx = getParam("eval_i", []) #'11,77,16,21,45'
if len(eval_dataset) > 0:
eval_dataset = list(map(int, eval_dataset.split(',')))
if len(eval_t) > 0:
eval_t = list(map(int, eval_t.split(',')))
if len(eval_var) > 0:
eval_var = list(map(int, eval_var.split(',')))
if len(eval_patch_idx) > 0:
eval_patch_idx = list(map(float, eval_patch_idx.split(',')))
i=0
hyper_teams = []
while(True):
hyper_par = getParam("hyper%d"%i, None)
i += 1
if hyper_par is None:
break
else:
hyper_teams.append(HyperParameter.parse(hyper_par))
checkUnusedParams()
src_path = data_path + "patches/source/"
ref_path = data_path + "patches/reference/"
model_path = data_path + "models/"
if not os.path.exists(model_path):
os.mkdir(model_path)
tmp_folder = backupSources(data_path)
tmp_model_path = tmp_folder + "models/"
os.mkdir(tmp_model_path)
tmp_eval_path = tmp_folder + "eval/"
os.mkdir(tmp_eval_path)
if not gpu is "":
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
with open(config_path, 'r') as f:
config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['data'], 'r') as f:
data_config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['preprocess'], 'r') as f:
pre_config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['train'], 'r') as f:
train_config = json.loads(f.read())
if verbose:
print("Config Loaded:")
print(config)
print(data_config)
print(pre_config)
print(train_config)
# copy config files into tmp
np.random.seed(data_config['seed'])
#tf.set_random_seed(data_config['seed'])
if epochs == 0:
epochs = train_config['epochs']
config_dict = {**data_config, **pre_config, **train_config}
punet = PUNet(**config_dict)
if len(eval_dataset) < eval_cnt:
eval_dataset.extend(np.random.randint(int(data_config['data_count'] * train_config['train_split']), data_config['data_count'], eval_cnt-len(eval_dataset)))
if len(eval_t) < eval_cnt:
t_start = min(train_config['t_start'], data_config['frame_count']-1)
t_end = min(train_config['t_end'], data_config['frame_count'])
eval_t.extend(np.random.randint(t_start, t_end, eval_cnt-len(eval_t)))
if len(eval_var) < eval_cnt:
eval_var.extend([0]*(eval_cnt-len(eval_var)))
if len(eval_patch_idx) < eval_cnt:
eval_patch_idx.extend(np.random.random(eval_cnt-len(eval_patch_idx)))
tmp_model_path = '%s%s_%s' % (tmp_model_path, data_config['prefix'], config['id'])
fig_path = '%s_loss' % tmp_model_path
src_path = "%s%s_%s-%s" % (src_path, data_config['prefix'], data_config['id'], pre_config['id']) + "_d%03d_var%02d_pvar%02d_%03d"
ref_path = "%s%s_%s-%s" % (ref_path, data_config['prefix'], data_config['id'], pre_config['id']) + "_d%03d_var%02d_pvar%02d_%03d"
print(src_path)
print(ref_path)
print("Load Training Data")
src_data, ref_data = load_patches_from_file(data_path, config_path)
idx = np.arange(src_data[0].shape[0])
np.random.shuffle(idx)
src_data = [s[idx] for s in src_data]
ref_data = ref_data[idx]
print("Load Eval Data")
factor_d = math.pow(pre_config['factor'], 1/data_config['dim'])
patch_size = pre_config['patch_size'] * data_config['res'] / factor_d
patch_size_ref = pre_config['patch_size_ref'] * data_config['res']
eval_patch_extractors = []
eval_ref_datas = []
eval_src_patches = []
eval_ref_patches = []
for i in range(len(eval_dataset)):
(eval_src_data, eval_sdf_data, eval_par_aux), (eval_ref_data, eval_ref_sdf_data) = get_data_pair(data_path, config_path, eval_dataset[i], eval_t[i], eval_var[i])
eval_ref_datas.append(eval_ref_data)
np.random.seed(100)
eval_patch_extractors.append(PatchExtractor(eval_src_data, eval_sdf_data, patch_size, pre_config['par_cnt'], pre_config['surf'], pre_config['stride'], aux_data=eval_par_aux, features=train_config['features'], pad_val=pre_config['pad_val'], bnd=data_config['bnd']/factor_d))
p_idx = int(eval_patch_idx[i] * len(eval_patch_extractors[i].positions))
eval_src_patches.append(eval_patch_extractors[i].get_patch(p_idx,False))
eval_ref_patches.append(extract_particles(eval_ref_data, eval_patch_extractors[i].positions[p_idx] * factor_d, pre_config['par_cnt_ref'], patch_size_ref/2, pre_config['pad_val'])[0])
print("Eval with dataset %d, timestep %d, var %d, patch idx %d" % (eval_dataset[i], eval_t[i], eval_var[i], p_idx))
print("Eval trunc src: %d" % (np.count_nonzero(eval_src_patches[i][0][:,:,:1] != pre_config['pad_val'])))
print("Eval trunc ref: %d" % (np.count_nonzero(eval_ref_patches[i][:,:1] != pre_config['pad_val'])))
config_dict['src'] = src_data
config_dict['ref'] = ref_data
config_dict['callbacks'] = [(EvalCallback(tmp_eval_path + "eval_patch", eval_src_patches, eval_ref_patches,
train_config['features'], multiple_runs=True, z=None if data_config['dim'] == 2 else 0, verbose=1)),
(EvalCompleteCallback(tmp_eval_path + "eval", eval_patch_extractors, eval_ref_datas,
factor_d, data_config['res'], multiple_runs=True, z=None if data_config['dim'] == 2 else data_config['res']//2, verbose=1))]
hs = HyperSearch(punet, hyper_teams, output_folder=tmp_folder)
del config_dict['epochs']
history = hs.search(epochs, **config_dict)
keras.utils.plot_model(punet.model, tmp_model_path + '.pdf')
| [
"neuralparticles.tools.data_helpers.extract_particles",
"numpy.count_nonzero",
"neuralparticles.tensorflow.tools.eval_helpers.EvalCallback",
"numpy.arange",
"os.path.exists",
"keras.utils.plot_model",
"neuralparticles.tensorflow.models.PUNet.PUNet",
"neuralparticles.tools.data_helpers.load_patches_fro... | [((224, 245), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (238, 245), False, 'import matplotlib\n'), ((1881, 1905), 'os.mkdir', 'os.mkdir', (['tmp_model_path'], {}), '(tmp_model_path)\n', (1889, 1905), False, 'import os\n'), ((1943, 1966), 'os.mkdir', 'os.mkdir', (['tmp_eval_path'], {}), '(tmp_eval_path)\n', (1951, 1966), False, 'import os\n'), ((2608, 2643), 'numpy.random.seed', 'np.random.seed', (["data_config['seed']"], {}), "(data_config['seed'])\n", (2622, 2643), True, 'import numpy as np\n'), ((2807, 2827), 'neuralparticles.tensorflow.models.PUNet.PUNet', 'PUNet', ([], {}), '(**config_dict)\n', (2812, 2827), False, 'from neuralparticles.tensorflow.models.PUNet import PUNet\n'), ((3926, 3972), 'neuralparticles.tools.data_helpers.load_patches_from_file', 'load_patches_from_file', (['data_path', 'config_path'], {}), '(data_path, config_path)\n', (3948, 3972), False, 'from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles\n'), ((3980, 4011), 'numpy.arange', 'np.arange', (['src_data[0].shape[0]'], {}), '(src_data[0].shape[0])\n', (3989, 4011), True, 'import numpy as np\n'), ((4012, 4034), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (4029, 4034), True, 'import numpy as np\n'), ((4135, 4189), 'math.pow', 'math.pow', (["pre_config['factor']", "(1 / data_config['dim'])"], {}), "(pre_config['factor'], 1 / data_config['dim'])\n", (4143, 4189), False, 'import math\n'), ((6245, 6302), 'neuralparticles.tensorflow.tools.hyper_search.HyperSearch', 'HyperSearch', (['punet', 'hyper_teams'], {'output_folder': 'tmp_folder'}), '(punet, hyper_teams, output_folder=tmp_folder)\n', (6256, 6302), False, 'from neuralparticles.tensorflow.tools.hyper_search import HyperSearch\n'), ((6373, 6433), 'keras.utils.plot_model', 'keras.utils.plot_model', (['punet.model', "(tmp_model_path + '.pdf')"], {}), "(punet.model, tmp_model_path + '.pdf')\n", (6395, 6433), False, 'import keras\n'), ((1752, 1778), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (1766, 1778), False, 'import os\n'), ((1781, 1801), 'os.mkdir', 'os.mkdir', (['model_path'], {}), '(model_path)\n', (1789, 1801), False, 'import os\n'), ((4540, 4618), 'neuralparticles.tools.data_helpers.get_data_pair', 'get_data_pair', (['data_path', 'config_path', 'eval_dataset[i]', 'eval_t[i]', 'eval_var[i]'], {}), '(data_path, config_path, eval_dataset[i], eval_t[i], eval_var[i])\n', (4553, 4618), False, 'from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles\n'), ((4665, 4684), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (4679, 4684), True, 'import numpy as np\n'), ((5730, 5915), 'neuralparticles.tensorflow.tools.eval_helpers.EvalCallback', 'EvalCallback', (["(tmp_eval_path + 'eval_patch')", 'eval_src_patches', 'eval_ref_patches', "train_config['features']"], {'multiple_runs': '(True)', 'z': "(None if data_config['dim'] == 2 else 0)", 'verbose': '(1)'}), "(tmp_eval_path + 'eval_patch', eval_src_patches,\n eval_ref_patches, train_config['features'], multiple_runs=True, z=None if\n data_config['dim'] == 2 else 0, verbose=1)\n", (5742, 5915), False, 'from neuralparticles.tensorflow.tools.eval_helpers import EvalCallback, EvalCompleteCallback\n'), ((5981, 6198), 'neuralparticles.tensorflow.tools.eval_helpers.EvalCompleteCallback', 'EvalCompleteCallback', (["(tmp_eval_path + 'eval')", 'eval_patch_extractors', 'eval_ref_datas', 'factor_d', "data_config['res']"], {'multiple_runs': '(True)', 'z': "(None if data_config['dim'] == 2 else data_config['res'] // 2)", 'verbose': '(1)'}), "(tmp_eval_path + 'eval', eval_patch_extractors,\n eval_ref_datas, factor_d, data_config['res'], multiple_runs=True, z=\n None if data_config['dim'] == 2 else data_config['res'] // 2, verbose=1)\n", (6001, 6198), False, 'from neuralparticles.tensorflow.tools.eval_helpers import EvalCallback, EvalCompleteCallback\n'), ((4718, 4978), 'neuralparticles.tools.data_helpers.PatchExtractor', 'PatchExtractor', (['eval_src_data', 'eval_sdf_data', 'patch_size', "pre_config['par_cnt']", "pre_config['surf']", "pre_config['stride']"], {'aux_data': 'eval_par_aux', 'features': "train_config['features']", 'pad_val': "pre_config['pad_val']", 'bnd': "(data_config['bnd'] / factor_d)"}), "(eval_src_data, eval_sdf_data, patch_size, pre_config[\n 'par_cnt'], pre_config['surf'], pre_config['stride'], aux_data=\n eval_par_aux, features=train_config['features'], pad_val=pre_config[\n 'pad_val'], bnd=data_config['bnd'] / factor_d)\n", (4732, 4978), False, 'from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles\n'), ((1569, 1600), 'neuralparticles.tensorflow.tools.hyper_parameter.HyperParameter.parse', 'HyperParameter.parse', (['hyper_par'], {}), '(hyper_par)\n', (1589, 1600), False, 'from neuralparticles.tensorflow.tools.hyper_parameter import HyperParameter, ValueType, SearchType\n'), ((5145, 5310), 'neuralparticles.tools.data_helpers.extract_particles', 'extract_particles', (['eval_ref_data', '(eval_patch_extractors[i].positions[p_idx] * factor_d)', "pre_config['par_cnt_ref']", '(patch_size_ref / 2)', "pre_config['pad_val']"], {}), "(eval_ref_data, eval_patch_extractors[i].positions[p_idx] *\n factor_d, pre_config['par_cnt_ref'], patch_size_ref / 2, pre_config[\n 'pad_val'])\n", (5162, 5310), False, 'from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles\n'), ((5459, 5534), 'numpy.count_nonzero', 'np.count_nonzero', (["(eval_src_patches[i][0][:, :, :1] != pre_config['pad_val'])"], {}), "(eval_src_patches[i][0][:, :, :1] != pre_config['pad_val'])\n", (5475, 5534), True, 'import numpy as np\n'), ((5569, 5638), 'numpy.count_nonzero', 'np.count_nonzero', (["(eval_ref_patches[i][:, :1] != pre_config['pad_val'])"], {}), "(eval_ref_patches[i][:, :1] != pre_config['pad_val'])\n", (5585, 5638), True, 'import numpy as np\n'), ((2111, 2139), 'os.path.dirname', 'os.path.dirname', (['config_path'], {}), '(config_path)\n', (2126, 2139), False, 'import os\n'), ((2225, 2253), 'os.path.dirname', 'os.path.dirname', (['config_path'], {}), '(config_path)\n', (2240, 2253), False, 'import os\n'), ((2344, 2372), 'os.path.dirname', 'os.path.dirname', (['config_path'], {}), '(config_path)\n', (2359, 2372), False, 'import os\n')] |
from collections import defaultdict
from copy import copy
import re
import json
from django.conf import settings
from django.test import override_settings, TestCase
import responses
from prison.models import PrisonerLocation
from prison.tests.utils import (
load_prisoner_locations_from_dev_prison_api,
random_prisoner_number,
random_prisoner_name,
random_prisoner_dob,
)
class LoadPrisonerLocationsFromDevPrisonAPITestCase(TestCase):
fixtures = [
'initial_types.json',
'initial_groups.json',
'dev_prison_api_prisons.json',
]
def setUp(self):
self.prison_ids = [
'BWI', # HMP Berwyn
'NMI', # HMP Nottingham
'WLI', # HMP Wayland
]
prisoners_per_prison = {
'BWI': 1,
'NMI': 5,
'WLI': 2,
}
# Dictionaries with data returned by mocked API
self.api_live_roll = defaultdict(list)
self.api_offenders_info = {}
# Location of each test prisoner
self.prisoner_location = {}
for prison_id, n_prisoners in prisoners_per_prison.items():
for _ in range(n_prisoners):
prisoner_id = random_prisoner_number()
prisoner_info = self.random_prisoner()
self.api_live_roll[prison_id].append(prisoner_id)
self.api_offenders_info[prisoner_id] = prisoner_info
self.prisoner_location[prisoner_id] = prison_id
self.n_prisoners_desired = 5
# 1 prisoner from BWI
self.expected_prisoner_ids = self.api_live_roll['BWI']
# first 2 prisoners from NMI
prisoners = copy(self.api_live_roll['NMI'])
prisoners.sort()
self.expected_prisoner_ids = self.expected_prisoner_ids + prisoners[:2]
# another 2 prisoners from WLI
self.expected_prisoner_ids = self.expected_prisoner_ids + self.api_live_roll['WLI']
def random_prisoner(self):
full_name = random_prisoner_name()
first_name = full_name.split(' ')[0]
last_name = full_name.split(' ')[1]
return {
'given_name': first_name,
'middle_names': '',
'surname': last_name,
'date_of_birth': str(random_prisoner_dob()),
# HMPPS Prison API returns more information not included here
}
def get_live_roll_callback(self, request):
# Mock for `GET prison/PRISON_ID/live_roll`
prison_id = request.path_url.split('/')[-2]
live_roll = {
'noms_ids': self.api_live_roll[prison_id],
}
return (200, {}, json.dumps(live_roll))
def get_offender_info_callback(self, request):
# Mock for `GET offenders/PRISONER_ID`
prisoner_id = request.path_url.split('/')[-1]
prisoner_info = self.api_offenders_info[prisoner_id]
return (200, {}, json.dumps(prisoner_info))
@override_settings(
HMPPS_CLIENT_SECRET='test-secret',
HMPPS_AUTH_BASE_URL='https://sign-in-dev.hmpps.local/auth/',
HMPPS_PRISON_API_BASE_URL='https://api-dev.prison.local/',
)
def test_load_prisoner_locations_from_dev_prison_api(self):
n_prisoner_locations_before = PrisonerLocation.objects.count()
with responses.RequestsMock() as rsps:
rsps.add(
responses.POST,
f'{settings.HMPPS_AUTH_BASE_URL}oauth/token',
json={
'access_token': '<PASSWORD>',
'expires_in': 3600,
},
)
rsps.add_callback(
responses.GET,
re.compile(f'{settings.HMPPS_PRISON_API_BASE_URL}api/v1/prison/[A-Z]+/live_roll'),
callback=self.get_live_roll_callback,
)
rsps.add_callback(
responses.GET,
re.compile(f'{settings.HMPPS_PRISON_API_BASE_URL}api/v1/offenders/*'),
callback=self.get_offender_info_callback,
)
load_prisoner_locations_from_dev_prison_api(self.n_prisoners_desired)
n_prisoner_locations_after = PrisonerLocation.objects.count()
n_prisoner_locations_created = n_prisoner_locations_after - n_prisoner_locations_before
self.assertEqual(self.n_prisoners_desired, n_prisoner_locations_created)
for prisoner_id in self.expected_prisoner_ids:
prisoner_info = self.api_offenders_info[prisoner_id]
prison_id = self.prisoner_location[prisoner_id]
location = PrisonerLocation.objects.filter(
prisoner_number=prisoner_id,
prison_id=prison_id,
)
self.assertTrue(location.exists())
location = location.first()
self.assertEqual(location.prisoner_number, prisoner_id)
expected_name = prisoner_info['given_name'] + ' ' + prisoner_info['surname']
self.assertEqual(location.prisoner_name, expected_name)
self.assertEqual(str(location.prisoner_dob), prisoner_info['date_of_birth'])
| [
"prison.tests.utils.random_prisoner_dob",
"prison.tests.utils.random_prisoner_number",
"re.compile",
"json.dumps",
"django.test.override_settings",
"collections.defaultdict",
"prison.tests.utils.random_prisoner_name",
"responses.RequestsMock",
"copy.copy",
"prison.models.PrisonerLocation.objects.f... | [((2932, 3114), 'django.test.override_settings', 'override_settings', ([], {'HMPPS_CLIENT_SECRET': '"""test-secret"""', 'HMPPS_AUTH_BASE_URL': '"""https://sign-in-dev.hmpps.local/auth/"""', 'HMPPS_PRISON_API_BASE_URL': '"""https://api-dev.prison.local/"""'}), "(HMPPS_CLIENT_SECRET='test-secret', HMPPS_AUTH_BASE_URL=\n 'https://sign-in-dev.hmpps.local/auth/', HMPPS_PRISON_API_BASE_URL=\n 'https://api-dev.prison.local/')\n", (2949, 3114), False, 'from django.test import override_settings, TestCase\n'), ((937, 954), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (948, 954), False, 'from collections import defaultdict\n'), ((1677, 1708), 'copy.copy', 'copy', (["self.api_live_roll['NMI']"], {}), "(self.api_live_roll['NMI'])\n", (1681, 1708), False, 'from copy import copy\n'), ((1997, 2019), 'prison.tests.utils.random_prisoner_name', 'random_prisoner_name', ([], {}), '()\n', (2017, 2019), False, 'from prison.tests.utils import load_prisoner_locations_from_dev_prison_api, random_prisoner_number, random_prisoner_name, random_prisoner_dob\n'), ((3238, 3270), 'prison.models.PrisonerLocation.objects.count', 'PrisonerLocation.objects.count', ([], {}), '()\n', (3268, 3270), False, 'from prison.models import PrisonerLocation\n'), ((4152, 4184), 'prison.models.PrisonerLocation.objects.count', 'PrisonerLocation.objects.count', ([], {}), '()\n', (4182, 4184), False, 'from prison.models import PrisonerLocation\n'), ((2636, 2657), 'json.dumps', 'json.dumps', (['live_roll'], {}), '(live_roll)\n', (2646, 2657), False, 'import json\n'), ((2899, 2924), 'json.dumps', 'json.dumps', (['prisoner_info'], {}), '(prisoner_info)\n', (2909, 2924), False, 'import json\n'), ((3285, 3309), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (3307, 3309), False, 'import responses\n'), ((4044, 4113), 'prison.tests.utils.load_prisoner_locations_from_dev_prison_api', 'load_prisoner_locations_from_dev_prison_api', (['self.n_prisoners_desired'], {}), '(self.n_prisoners_desired)\n', (4087, 4113), False, 'from prison.tests.utils import load_prisoner_locations_from_dev_prison_api, random_prisoner_number, random_prisoner_name, random_prisoner_dob\n'), ((4568, 4654), 'prison.models.PrisonerLocation.objects.filter', 'PrisonerLocation.objects.filter', ([], {'prisoner_number': 'prisoner_id', 'prison_id': 'prison_id'}), '(prisoner_number=prisoner_id, prison_id=\n prison_id)\n', (4599, 4654), False, 'from prison.models import PrisonerLocation\n'), ((1209, 1233), 'prison.tests.utils.random_prisoner_number', 'random_prisoner_number', ([], {}), '()\n', (1231, 1233), False, 'from prison.tests.utils import load_prisoner_locations_from_dev_prison_api, random_prisoner_number, random_prisoner_name, random_prisoner_dob\n'), ((2263, 2284), 'prison.tests.utils.random_prisoner_dob', 'random_prisoner_dob', ([], {}), '()\n', (2282, 2284), False, 'from prison.tests.utils import load_prisoner_locations_from_dev_prison_api, random_prisoner_number, random_prisoner_name, random_prisoner_dob\n'), ((3659, 3745), 're.compile', 're.compile', (['f"""{settings.HMPPS_PRISON_API_BASE_URL}api/v1/prison/[A-Z]+/live_roll"""'], {}), "(\n f'{settings.HMPPS_PRISON_API_BASE_URL}api/v1/prison/[A-Z]+/live_roll')\n", (3669, 3745), False, 'import re\n'), ((3888, 3957), 're.compile', 're.compile', (['f"""{settings.HMPPS_PRISON_API_BASE_URL}api/v1/offenders/*"""'], {}), "(f'{settings.HMPPS_PRISON_API_BASE_URL}api/v1/offenders/*')\n", (3898, 3957), False, 'import re\n')] |
from BridgePython import Bridge
bridge = Bridge(api_key='myapikey')
class PongHandler(object):
def pong(self):
print ("PONG!")
bridge.store_service("pong", PongHandler())
bridge.get_service("ping").ping()
bridge.connect()
| [
"BridgePython.Bridge"
] | [((41, 67), 'BridgePython.Bridge', 'Bridge', ([], {'api_key': '"""myapikey"""'}), "(api_key='myapikey')\n", (47, 67), False, 'from BridgePython import Bridge\n')] |
import torch.nn as nn
class FcNet(nn.Module):
def __init__(self, config, input_features, nr_labels):
super(FcNet, self).__init__()
self.config = config
# create the blocks
self.layers = self._make_block(self.config["num_layers"], input_features)
self.fc_layer = nn.Linear(self.config["num_units_%i" % self.config["num_layers"]], int(nr_labels))
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out = self.layers(x)
out = self.fc_layer(out)
return out
def _make_block(self, nr_layers, input_features):
blocks = list()
blocks.append(BasicBlock(input_features, self.config, 1))
for i in range(2, nr_layers + 1):
blocks.append(BasicBlock(self.config["num_units_%i" % (i-1)], self.config, i))
return nn.Sequential(*blocks)
class BasicBlock(nn.Module):
def __init__(self, in_features, config, block_nr):
super(BasicBlock, self).__init__()
self.dropout_activated = True if config['activate_dropout'] == 'Yes' else False
self.batch_norm_activated = True if config['activate_batch_norm'] == 'Yes' else False
self.training = True
self.linear = nn.Linear(in_features, config['num_units_%i' % block_nr])
self.relu = nn.ReLU(inplace=True)
if self.dropout_activated:
self.dropout = nn.Dropout(p=config['dropout_%i' % block_nr])
if self.batch_norm_activated:
self.batch_norm = nn.BatchNorm1d(config['num_units_%i' % block_nr])
def forward(self, x):
out = self.linear(x)
out = self.relu(out)
if self.dropout_activated:
out = self.dropout(out)
if self.batch_norm_activated:
out = self.batch_norm(out)
return out
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.Linear"
] | [((951, 973), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (964, 973), True, 'import torch.nn as nn\n'), ((1338, 1395), 'torch.nn.Linear', 'nn.Linear', (['in_features', "config['num_units_%i' % block_nr]"], {}), "(in_features, config['num_units_%i' % block_nr])\n", (1347, 1395), True, 'import torch.nn as nn\n'), ((1416, 1437), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1423, 1437), True, 'import torch.nn as nn\n'), ((1500, 1545), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': "config['dropout_%i' % block_nr]"}), "(p=config['dropout_%i' % block_nr])\n", (1510, 1545), True, 'import torch.nn as nn\n'), ((1614, 1663), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (["config['num_units_%i' % block_nr]"], {}), "(config['num_units_%i' % block_nr])\n", (1628, 1663), True, 'import torch.nn as nn\n')] |
from __future__ import annotations
import os
import string
import random
import logging
import vapoursynth as vs
from pathlib import Path
from requests import Session
from functools import partial
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from typing import Any, Mapping, Callable, Dict, Final, List, NamedTuple, Optional, Set, cast
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QObject, QThread, pyqtSignal
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar
from ...utils import set_qobject_names
from ...widgets import ComboBox, FrameEdit
from ...models import PictureTypes, VideoOutputs
from ...core import AbstractMainWindow, AbstractToolbar, PictureType, try_load, main_window
from .settings import CompSettings
_MAX_ATTEMPTS_PER_PICTURE_TYPE: Final[int] = 50
def select_frames(clip: vs.VideoNode, indices: List[int]) -> vs.VideoNode:
return clip.std.BlankClip(length=len(indices)).std.FrameEval(lambda n: clip[indices[n]])
class WorkerConfiguration(NamedTuple):
outputs: VideoOutputs
collection_name: str
public: bool
nsfw: bool
optimise: bool
remove_after: Optional[int]
frames: List[int]
compression: int
path: Path
class Worker(QObject):
finished = pyqtSignal()
progress_bar = pyqtSignal(int)
progress_status = pyqtSignal(str, int, int)
outputs: VideoOutputs
is_finished = False
def _progress_update_func(self, value: int, endvalue: int) -> None:
if value == 0:
self.progress_bar.emit(0)
else:
self.progress_bar.emit(int(100 * value / endvalue))
def run(self, conf: WorkerConfiguration) -> None:
self.conf = conf
all_images: List[List[Path]] = []
try:
for i, output in enumerate(conf.outputs):
if self.is_finished:
raise StopIteration
self.progress_status.emit('extract', i + 1, len(conf.outputs))
path_name = conf.path / output.name
path_name.mkdir(parents=True)
max_num = max(conf.frames)
path_images = [
path_name / (f'{output.name}_' + f'{f}'.zfill(len("%i" % max_num)) + '.png')
for f in conf.frames
]
def _save(n: int, f: vs.VideoFrame) -> vs.VideoFrame:
if self.is_finished:
raise StopIteration
QImage(cast(bytes, f[0]), f.width, f.height, QImage.Format_RGB32).save(
str(path_images[n]), 'PNG', conf.compression
)
return f
decimated = select_frames(output.prepared.clip, conf.frames)
clip = decimated.std.ModifyFrame(decimated, _save)
with open(os.devnull, 'wb') as devnull:
clip.output(devnull, y4m=False, progress_update=self._progress_update_func)
if self.is_finished:
raise StopIteration
all_images.append(sorted(path_images))
except StopIteration:
return self.finished.emit('')
fields: Dict[str, Any] = {}
for i, (output, images) in enumerate(zip(conf.outputs, all_images)):
if self.is_finished:
return self.finished.emit('')
for j, (image, frame) in enumerate(zip(images, conf.frames)):
if self.is_finished:
return self.finished.emit('') # type: ignore
fields[f'comparisons[{j}].name'] = str(frame)
fields[f'comparisons[{j}].images[{i}].name'] = output.name
fields[f'comparisons[{j}].images[{i}].file'] = (image.name, image.read_bytes(), 'image/png')
self.progress_status.emit('upload', 0, 0)
with Session() as sess:
sess.get('https://slow.pics/api/comparison')
if self.is_finished:
return self.finished.emit('')
head_conf = {
'collectionName': conf.collection_name,
'public': str(conf.public).lower(),
'optimizeImages': str(conf.optimise).lower(),
'hentai': str(conf.nsfw).lower(),
}
if conf.remove_after is not None:
head_conf |= {'removeAfter': str(conf.remove_after)}
def _monitor_cb(monitor: MultipartEncoderMonitor) -> None:
self._progress_update_func(monitor.bytes_read, monitor.len)
files = MultipartEncoder(head_conf | fields)
monitor = MultipartEncoderMonitor(files, _monitor_cb)
response = sess.post(
'https://slow.pics/api/comparison',
monitor.to_string(), # type: ignore
headers={
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.5",
"Content-Length": str(files.len),
"Content-Type": files.content_type,
"Origin": "https://slow.pics/",
"Referer": "https://slow.pics/comparison",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
),
"X-XSRF-TOKEN": sess.cookies.get_dict()["XSRF-TOKEN"] # noqa
}
)
self.progress_status.emit(f'https://slow.pics/c/{response.text}', 0, 0)
self.finished.emit()
class CompToolbar(AbstractToolbar):
_storable_attrs = ('settings', 'visibility')
_thread_running = False
__slots__ = (
*_storable_attrs, 'random_frames_control', 'manual_frames_lineedit',
'current_frame_checkbox', 'is_public_checkbox', 'is_nsfw_checkbox',
'output_url_lineedit', 'output_url_copy_button', 'start_upload_button', 'stop_upload_button',
'upload_progressbar', 'upload_status_label', 'upload_status_elements'
)
def __init__(self, main: AbstractMainWindow) -> None:
super().__init__(main, CompSettings())
self.setup_ui()
set_qobject_names(self)
def setup_ui(self) -> None:
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
random_frames_label = QLabel('Num Random Frames:', self)
layout.addWidget(random_frames_label)
self.random_frames_control = FrameEdit(self)
layout.addWidget(self.random_frames_control)
manual_frames_label = QLabel('Additional Frames:', self)
layout.addWidget(manual_frames_label)
self.manual_frames_lineedit = QLineEdit(self)
self.manual_frames_lineedit.setPlaceholderText('frame,frame,frame')
layout.addWidget(self.manual_frames_lineedit)
self.current_frame_checkbox = QCheckBox('Current Frame', self)
self.current_frame_checkbox.setChecked(True)
layout.addWidget(self.current_frame_checkbox)
layout.addWidget(self.get_separator())
picture_type_label = QLabel('Filter per Picture Type:', self)
layout.addWidget(picture_type_label)
self.pic_type_combox = ComboBox[PictureType](self)
self.pic_type_combox.setModel(PictureTypes())
self.pic_type_combox.setEditable(True)
self.pic_type_combox.setInsertPolicy(QComboBox.InsertAtCurrent)
self.pic_type_combox.setDuplicatesEnabled(True)
self.pic_type_combox.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.pic_type_combox.view().setMinimumWidth(self.pic_type_combox.minimumSizeHint().width())
temp_width = self.pic_type_combox.minimumSizeHint().width()
self.pic_type_combox.setMinimumWidth(temp_width + temp_width // 10)
self.pic_type_combox.setCurrentIndex(0)
layout.addWidget(self.pic_type_combox)
layout.addWidget(self.get_separator())
self.is_public_checkbox = QCheckBox('Public', self)
self.is_public_checkbox.setChecked(True)
layout.addWidget(self.is_public_checkbox)
self.is_nsfw_checkbox = QCheckBox('NSFW', self)
self.is_nsfw_checkbox.setChecked(False)
layout.addWidget(self.is_nsfw_checkbox)
layout.addWidget(self.get_separator())
self.output_url_lineedit = QLineEdit('https://slow.pics/c/', self)
self.output_url_lineedit.setEnabled(False)
layout.addWidget(self.output_url_lineedit)
self.output_url_copy_button = QPushButton(self)
self.output_url_copy_button.clicked.connect(self.on_copy_output_url_clicked)
self.output_url_copy_button.setText('⎘')
layout.addWidget(self.output_url_copy_button)
self.start_upload_button = QPushButton('Upload to slow.pics', self)
self.start_upload_button.clicked.connect(self.on_start_upload)
layout.addWidget(self.start_upload_button)
self.stop_upload_button = QPushButton('Stop Uploading', self)
self.stop_upload_button.clicked.connect(self.on_stop_upload)
self.stop_upload_button.setVisible(False)
layout.addWidget(self.stop_upload_button)
upload_separator = self.get_separator()
layout.addWidget(upload_separator)
self.upload_progressbar = QProgressBar(self)
self.upload_progressbar.setGeometry(200, 80, 250, 20)
self.upload_progressbar.setValue(0)
layout.addWidget(self.upload_progressbar)
self.upload_status_label = QLabel(self)
layout.addWidget(self.upload_status_label)
self.update_status_label('extract')
self.upload_status_elements = (
upload_separator, self.upload_progressbar,
self.upload_status_label
)
self.update_upload_status_visibility(False)
layout.addStretch()
layout.addStretch()
def on_copy_output_url_clicked(self, checked: bool | None = None) -> None:
self.main.clipboard.setText(self.output_url_lineedit.text())
self.main.show_message('Slow.pics URL copied to clipboard')
def update_upload_status_visibility(self, visible: bool) -> None:
for element in self.upload_status_elements:
element.setVisible(visible)
def on_start_upload(self) -> None:
if self._thread_running:
return
self.start_upload_button.setVisible(False)
self.stop_upload_button.setVisible(True)
self.upload_to_slowpics()
def on_end_upload(self, forced: bool = False) -> None:
self.start_upload_button.setVisible(True)
self.stop_upload_button.setVisible(False)
self._thread_running = False
self.upload_thread.deleteLater()
if forced:
self.upload_status_label.setText("Stopped!")
else:
self.upload_status_label.setText("Finished!")
def on_stop_upload(self) -> None:
self.upload_worker.is_finished = True
self.on_end_upload(forced=True)
def update_status_label(self, kind: str, curr: int | None = None, total: int | None = None) -> None:
message = ''
moreinfo = f" {curr or '?'}/{total or '?'} " if curr or total else ''
if kind == 'extract':
message = 'Extracting'
elif kind == 'upload':
message = 'Uploading'
elif kind == 'search':
message = 'Searching'
else:
return self.output_url_lineedit.setText(kind)
self.upload_status_label.setText(f'{message}{moreinfo}...')
def _rand_num_frames(self, checked: Set[int], rand_func: Callable[[], int]) -> int:
rnum = rand_func()
while rnum in checked:
rnum = rand_func()
return rnum
def _select_samples_ptypes(self, num_frames: int, k: int, picture_type: PictureType) -> List[int]:
samples: Set[int] = set()
_max_attempts = 0
_rnum_checked: Set[int] = set()
while len(samples) < k:
_attempts = 0
while True:
self.update_status_label('search', _attempts, _MAX_ATTEMPTS_PER_PICTURE_TYPE)
if len(_rnum_checked) >= num_frames:
raise ValueError(f'There aren\'t enough of {picture_type} in these clips')
rnum = self._rand_num_frames(_rnum_checked, partial(random.randrange, start=0, stop=num_frames))
_rnum_checked.add(rnum)
if all(
f.props['_PictType'].decode('utf-8') == str(picture_type)[0]
for f in vs.core.std.Splice(
[select_frames(out.prepared.clip, [rnum]) for out in self.main.outputs], True
).frames()
):
break
_attempts += 1
_max_attempts += 1
if _attempts > _MAX_ATTEMPTS_PER_PICTURE_TYPE:
logging.warning(
f'{_MAX_ATTEMPTS_PER_PICTURE_TYPE} attempts were made for sample {len(samples)} '
f'and no match found for {picture_type}; stopping iteration...')
break
if _max_attempts > (curr_max_att := _MAX_ATTEMPTS_PER_PICTURE_TYPE * k):
raise RecursionError(f'Comp: attempts max of {curr_max_att} has been reached!')
if _attempts < _MAX_ATTEMPTS_PER_PICTURE_TYPE:
samples.add(rnum)
self.upload_progressbar.setValue(int())
self.upload_progressbar.setValue(int(100 * len(samples) / k))
return list(samples)
def get_slowpics_conf(self) -> WorkerConfiguration:
self.update_upload_status_visibility(True)
clips: Dict[str, vs.VideoNode]
num = int(self.random_frames_control.value())
frames: List[int] = list(
map(int, filter(None, [x.strip() for x in self.manual_frames_lineedit.text().split(',')]))
)
picture_type = self.pic_type_combox.currentData()
lens = set(out.prepared.clip.num_frames for out in self.main.outputs)
if len(lens) != 1:
logging.warning('Outputted clips don\'t all have the same length!')
lens_n = min(lens)
path = Path(main_window().config_dir) / ''.join(random.choices(string.ascii_uppercase + string.digits, k=16))
path.mkdir(parents=True)
if num:
if picture_type is PictureType.UNSET:
samples = random.sample(range(lens_n), num)
else:
logging.info('Making samples according to specified picture types...')
samples = self._select_samples_ptypes(lens_n, num, picture_type)
else:
samples = []
if len(frames):
samples.extend(frames)
if self.current_frame_checkbox.isChecked():
samples.append(int(self.main.current_frame))
return WorkerConfiguration(
self.main.outputs, 'Function Test',
self.is_public_checkbox.isChecked(), self.is_nsfw_checkbox.isChecked(),
True, None, sorted(set(samples)), -1, path
)
def upload_to_slowpics(self) -> None:
self.upload_thread = QThread()
self.upload_worker = Worker()
self.upload_worker.moveToThread(self.upload_thread)
self.upload_thread.started.connect(
partial(self.upload_worker.run, conf=self.get_slowpics_conf())
)
self.upload_worker.finished.connect(self.upload_thread.quit)
self.upload_worker.finished.connect(self.upload_worker.deleteLater)
self.upload_thread.finished.connect(self.on_end_upload)
self.upload_worker.progress_bar.connect(self.upload_progressbar.setValue)
self.upload_worker.progress_status.connect(self.update_status_label)
self.upload_thread.start()
self._thread_running = True
def __getstate__(self) -> Mapping[str, Any]:
return {
attr_name: getattr(self, attr_name)
for attr_name in self._storable_attrs
}
def __setstate__(self, state: Mapping[str, Any]) -> None:
try_load(state, 'visibility', bool, self.on_toggle)
try_load(state, 'settings', CompSettings, self.__setattr__)
| [
"PyQt5.QtCore.pyqtSignal",
"requests.Session",
"requests_toolbelt.MultipartEncoderMonitor",
"logging.warning",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QProgressBar",
"typing.cast",
"random.choices",
"PyQt5.QtWidgets.QLabel",
"functools.partial",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.Qt... | [((1311, 1323), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (1321, 1323), False, 'from PyQt5.QtCore import QObject, QThread, pyqtSignal\n'), ((1343, 1358), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['int'], {}), '(int)\n', (1353, 1358), False, 'from PyQt5.QtCore import QObject, QThread, pyqtSignal\n'), ((1381, 1406), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str', 'int', 'int'], {}), '(str, int, int)\n', (1391, 1406), False, 'from PyQt5.QtCore import QObject, QThread, pyqtSignal\n'), ((6486, 6503), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self'], {}), '(self)\n', (6497, 6503), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((6581, 6615), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Num Random Frames:"""', 'self'], {}), "('Num Random Frames:', self)\n", (6587, 6615), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((6800, 6834), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Additional Frames:"""', 'self'], {}), "('Additional Frames:', self)\n", (6806, 6834), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((6920, 6935), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (6929, 6935), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((7105, 7137), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""Current Frame"""', 'self'], {}), "('Current Frame', self)\n", (7114, 7137), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((7323, 7363), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Filter per Picture Type:"""', 'self'], {}), "('Filter per Picture Type:', self)\n", (7329, 7363), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((8197, 8222), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""Public"""', 'self'], {}), "('Public', self)\n", (8206, 8222), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((8355, 8378), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""NSFW"""', 'self'], {}), "('NSFW', self)\n", (8364, 8378), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((8559, 8598), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['"""https://slow.pics/c/"""', 'self'], {}), "('https://slow.pics/c/', self)\n", (8568, 8598), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((8740, 8757), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['self'], {}), '(self)\n', (8751, 8757), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((8982, 9022), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Upload to slow.pics"""', 'self'], {}), "('Upload to slow.pics', self)\n", (8993, 9022), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((9180, 9215), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Stop Uploading"""', 'self'], {}), "('Stop Uploading', self)\n", (9191, 9215), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((9513, 9531), 'PyQt5.QtWidgets.QProgressBar', 'QProgressBar', (['self'], {}), '(self)\n', (9525, 9531), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((9724, 9736), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (9730, 9736), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QComboBox, QProgressBar\n'), ((15392, 15401), 'PyQt5.QtCore.QThread', 'QThread', ([], {}), '()\n', (15399, 15401), False, 'from PyQt5.QtCore import QObject, QThread, pyqtSignal\n'), ((3901, 3910), 'requests.Session', 'Session', ([], {}), '()\n', (3908, 3910), False, 'from requests import Session\n'), ((4600, 4636), 'requests_toolbelt.MultipartEncoder', 'MultipartEncoder', (['(head_conf | fields)'], {}), '(head_conf | fields)\n', (4616, 4636), False, 'from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\n'), ((4659, 4702), 'requests_toolbelt.MultipartEncoderMonitor', 'MultipartEncoderMonitor', (['files', '_monitor_cb'], {}), '(files, _monitor_cb)\n', (4682, 4702), False, 'from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor\n'), ((14315, 14381), 'logging.warning', 'logging.warning', (['"""Outputted clips don\'t all have the same length!"""'], {}), '("Outputted clips don\'t all have the same length!")\n', (14330, 14381), False, 'import logging\n'), ((14468, 14528), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(16)'}), '(string.ascii_uppercase + string.digits, k=16)\n', (14482, 14528), False, 'import random\n'), ((14725, 14795), 'logging.info', 'logging.info', (['"""Making samples according to specified picture types..."""'], {}), "('Making samples according to specified picture types...')\n", (14737, 14795), False, 'import logging\n'), ((12534, 12585), 'functools.partial', 'partial', (['random.randrange'], {'start': '(0)', 'stop': 'num_frames'}), '(random.randrange, start=0, stop=num_frames)\n', (12541, 12585), False, 'from functools import partial\n'), ((2530, 2547), 'typing.cast', 'cast', (['bytes', 'f[0]'], {}), '(bytes, f[0])\n', (2534, 2547), False, 'from typing import Any, Mapping, Callable, Dict, Final, List, NamedTuple, Optional, Set, cast\n')] |
# see https://www.spinningbytes.com/resources/germansentiment/ and https://github.com/aritter/twitter_download for obtaining the data.
import os
from pathlib import Path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from conversion import convert_examples_to_features, convert_text_to_examples
def load_datasets(data_dir, num_categories, test_size):
data = pd.read_csv(os.path.join(data_dir, "downloaded.tsv"), sep="\t", na_values="Not Available",
names=["id", "sentiment", "tweet_id", "?", "text"], index_col='id')
data = data.dropna(how='any')[['sentiment', 'text']]
data['sentiment'][data['sentiment'] == 'neutral'] = 2
data['sentiment'][data['sentiment'] == 'negative'] = 0
data['sentiment'][data['sentiment'] == 'positive'] = 1
if num_categories == 2:
data = data[np.logical_not(data.sentiment==2)]
X = data['text']
y = data['sentiment']
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=0)
return (X_train, y_train, X_test, y_test)
def get_tweets_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size):
fn = os.path.join(data_dir, "data_"+subtask+"_"+str(num_categories)+"cat_"+str(max_seq_length)+".npz")
if Path(fn).is_file():
f= np.load(fn)
train_input_ids = f['train_input_ids']
train_input_masks = f['train_input_masks']
train_segment_ids = f['train_segment_ids']
train_labels = f['train_labels']
test_input_ids = f['test_input_ids']
test_input_masks = f['test_input_masks']
test_segment_ids = f['test_segment_ids']
test_labels = f['test_labels']
f.close()
else:
X_train, y_train, X_test, y_test = load_datasets(data_dir, num_categories, test_size)
# Create datasets (Only take up to max_seq_length words for memory)
train_text = X_train.to_list()
train_text = [" ".join(t.split()[0:max_seq_length]) for t in train_text]
train_text = np.array(train_text, dtype=object)[:, np.newaxis]
train_label = y_train.tolist()
test_text = X_test.tolist()
test_text = [" ".join(t.split()[0:max_seq_length]) for t in test_text]
test_text = np.array(test_text, dtype=object)[:, np.newaxis]
test_label = y_test.tolist()
# Convert data to InputExample format
train_examples = convert_text_to_examples(train_text, train_label)
test_examples = convert_text_to_examples(test_text, test_label)
# Convert to features
(
train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
) = convert_examples_to_features(
tokenizer, train_examples, max_seq_length=max_seq_length
)
(
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels,
) = convert_examples_to_features(
tokenizer, test_examples, max_seq_length=max_seq_length
)
np.savez(fn,
train_input_ids=train_input_ids,
train_input_masks=train_input_masks,
train_segment_ids=train_segment_ids,
train_labels=train_labels,
test_input_ids=test_input_ids,
test_input_masks=test_input_masks,
test_segment_ids=test_segment_ids,
test_labels=test_labels
)
return (
train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels
)
| [
"numpy.savez",
"pathlib.Path",
"sklearn.model_selection.train_test_split",
"conversion.convert_text_to_examples",
"numpy.logical_not",
"os.path.join",
"numpy.array",
"conversion.convert_examples_to_features",
"numpy.load"
] | [((990, 1049), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': '(0)'}), '(X, y, test_size=test_size, random_state=0)\n', (1006, 1049), False, 'from sklearn.model_selection import train_test_split\n'), ((421, 461), 'os.path.join', 'os.path.join', (['data_dir', '"""downloaded.tsv"""'], {}), "(data_dir, 'downloaded.tsv')\n", (433, 461), False, 'import os\n'), ((1345, 1356), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1352, 1356), True, 'import numpy as np\n'), ((2460, 2509), 'conversion.convert_text_to_examples', 'convert_text_to_examples', (['train_text', 'train_label'], {}), '(train_text, train_label)\n', (2484, 2509), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((2534, 2581), 'conversion.convert_text_to_examples', 'convert_text_to_examples', (['test_text', 'test_label'], {}), '(test_text, test_label)\n', (2558, 2581), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((2756, 2847), 'conversion.convert_examples_to_features', 'convert_examples_to_features', (['tokenizer', 'train_examples'], {'max_seq_length': 'max_seq_length'}), '(tokenizer, train_examples, max_seq_length=\n max_seq_length)\n', (2784, 2847), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((3000, 3090), 'conversion.convert_examples_to_features', 'convert_examples_to_features', (['tokenizer', 'test_examples'], {'max_seq_length': 'max_seq_length'}), '(tokenizer, test_examples, max_seq_length=\n max_seq_length)\n', (3028, 3090), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((3125, 3417), 'numpy.savez', 'np.savez', (['fn'], {'train_input_ids': 'train_input_ids', 'train_input_masks': 'train_input_masks', 'train_segment_ids': 'train_segment_ids', 'train_labels': 'train_labels', 'test_input_ids': 'test_input_ids', 'test_input_masks': 'test_input_masks', 'test_segment_ids': 'test_segment_ids', 'test_labels': 'test_labels'}), '(fn, train_input_ids=train_input_ids, train_input_masks=\n train_input_masks, train_segment_ids=train_segment_ids, train_labels=\n train_labels, test_input_ids=test_input_ids, test_input_masks=\n test_input_masks, test_segment_ids=test_segment_ids, test_labels=\n test_labels)\n', (3133, 3417), True, 'import numpy as np\n'), ((868, 903), 'numpy.logical_not', 'np.logical_not', (['(data.sentiment == 2)'], {}), '(data.sentiment == 2)\n', (882, 903), True, 'import numpy as np\n'), ((1314, 1322), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1318, 1322), False, 'from pathlib import Path\n'), ((2069, 2103), 'numpy.array', 'np.array', (['train_text'], {'dtype': 'object'}), '(train_text, dtype=object)\n', (2077, 2103), True, 'import numpy as np\n'), ((2298, 2331), 'numpy.array', 'np.array', (['test_text'], {'dtype': 'object'}), '(test_text, dtype=object)\n', (2306, 2331), True, 'import numpy as np\n')] |
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import numpy as np
from argparse import ArgumentParser
import tensorflow as tf
# from lpot.adaptor.tf_utils.util import write_graph
from nets_factory import TFSlimNetsFactory
import copy
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.disable_eager_execution()
from inception_v4 import inception_v4, inception_v4_arg_scope
def save(model, path):
from tensorflow.python.platform import gfile
f = gfile.GFile(path, 'wb')
try:
f.write(model.as_graph_def().SerializeToString())
except AttributeError as no_model:
print("None of the quantized models fits the \
accuracy criteria: {0}".format(no_model))
except Exception as exc:
print("Unexpected error while saving the model: {0}".format(exc))
def main(_):
arg_parser = ArgumentParser(description='Parse args')
arg_parser.add_argument("--input-graph",
help='Specify the slim model',
dest='input_graph')
arg_parser.add_argument("--output-graph",
help='Specify tune result model save dir',
dest='output_graph')
arg_parser.add_argument("--config", default=None, help="tuning config")
arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark')
arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.')
args = arg_parser.parse_args()
factory = TFSlimNetsFactory()
# user specific model can register to slim net factory
input_shape = [None, 299, 299, 3]
factory.register('inception_v4', inception_v4, input_shape, inception_v4_arg_scope)
if args.input_graph.endswith('.ckpt'):
# directly get the topology name from input_graph
topology = args.input_graph.rsplit('/', 1)[-1].split('.', 1)[0]
# get the model func from net factory
assert topology in factory.default_slim_models, \
'only support topology {}'.format(factory.default_slim_models)
net = copy.deepcopy(factory.networks_map[topology])
model_func = net.pop('model')
arg_scope = net.pop('arg_scope')()
inputs_shape = net.pop('input_shape')
kwargs = net
images = tf.compat.v1.placeholder(name='input', dtype=tf.float32, \
shape=inputs_shape)
from lpot.adaptor.tf_utils.util import get_slim_graph
model = get_slim_graph(args.input_graph, model_func, arg_scope, images, **kwargs)
else:
model = args.input_graph
if args.tune:
from lpot import Quantization
quantizer = Quantization(args.config)
q_model = quantizer(model)
save(q_model, args.output_graph)
if args.benchmark:
from lpot import Benchmark
evaluator = Benchmark(args.config)
results = evaluator(model=model)
for mode, result in results.items():
acc, batch_size, result_list = result
latency = np.array(result_list).mean() / batch_size
print('\n{} mode benchmark result:'.format(mode))
print('Accuracy is {:.3f}'.format(acc))
print('Batch size = {}'.format(batch_size))
print('Latency: {:.3f} ms'.format(latency * 1000))
print('Throughput: {:.3f} images/sec'.format(1./ latency))
if __name__ == '__main__':
tf.compat.v1.app.run()
| [
"tensorflow.compat.v1.placeholder",
"lpot.Benchmark",
"lpot.adaptor.tf_utils.util.get_slim_graph",
"argparse.ArgumentParser",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.disable_eager_execution",
"lpot.Quantization",
"numpy.array",
"copy.deep... | [((805, 867), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (839, 867), True, 'import tensorflow as tf\n'), ((868, 906), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (904, 906), True, 'import tensorflow as tf\n'), ((1050, 1073), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (1061, 1073), False, 'from tensorflow.python.platform import gfile\n'), ((1424, 1464), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Parse args"""'}), "(description='Parse args')\n", (1438, 1464), False, 'from argparse import ArgumentParser\n'), ((2095, 2114), 'nets_factory.TFSlimNetsFactory', 'TFSlimNetsFactory', ([], {}), '()\n', (2112, 2114), False, 'from nets_factory import TFSlimNetsFactory\n'), ((3936, 3958), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (3956, 3958), True, 'import tensorflow as tf\n'), ((2648, 2693), 'copy.deepcopy', 'copy.deepcopy', (['factory.networks_map[topology]'], {}), '(factory.networks_map[topology])\n', (2661, 2693), False, 'import copy\n'), ((2849, 2925), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'name': '"""input"""', 'dtype': 'tf.float32', 'shape': 'inputs_shape'}), "(name='input', dtype=tf.float32, shape=inputs_shape)\n", (2873, 2925), True, 'import tensorflow as tf\n'), ((3038, 3111), 'lpot.adaptor.tf_utils.util.get_slim_graph', 'get_slim_graph', (['args.input_graph', 'model_func', 'arg_scope', 'images'], {}), '(args.input_graph, model_func, arg_scope, images, **kwargs)\n', (3052, 3111), False, 'from lpot.adaptor.tf_utils.util import get_slim_graph\n'), ((3223, 3248), 'lpot.Quantization', 'Quantization', (['args.config'], {}), '(args.config)\n', (3235, 3248), False, 'from lpot import Quantization\n'), ((3394, 3416), 'lpot.Benchmark', 'Benchmark', (['args.config'], {}), '(args.config)\n', (3403, 3416), False, 'from lpot import Benchmark\n'), ((3567, 3588), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (3575, 3588), True, 'import numpy as np\n')] |
import unittest
from musket_core import projects
from musket_core import parralel
import os
fl=__file__
fl=os.path.dirname(fl)
class TestCoders(unittest.TestCase):
def test_basic_network(self):
pr = projects.Project(os.path.join(fl, "project"))
exp = pr.byName("exp01")
tasks = exp.fit()
executor = parralel.get_executor(1, 1)
executor.execute(tasks)
r = exp.result()
self.assertGreaterEqual(r, 0, "Result should be greater then zero")
self.assertTrue(isinstance(r, float), "result should be float")
print(r)
pass | [
"os.path.dirname",
"os.path.join",
"musket_core.parralel.get_executor"
] | [((109, 128), 'os.path.dirname', 'os.path.dirname', (['fl'], {}), '(fl)\n', (124, 128), False, 'import os\n'), ((338, 365), 'musket_core.parralel.get_executor', 'parralel.get_executor', (['(1)', '(1)'], {}), '(1, 1)\n', (359, 365), False, 'from musket_core import parralel\n'), ((231, 258), 'os.path.join', 'os.path.join', (['fl', '"""project"""'], {}), "(fl, 'project')\n", (243, 258), False, 'import os\n')] |
#!/usr/bin/env python3
"""
Based on template: https://github.com/FedericoStra/cython-package-example
"""
from setuptools import setup
with open("requirements.txt") as fp:
install_requires = fp.read().strip().split("\n")
with open("requirements_dev.txt") as fp:
dev_requires = fp.read().strip().split("\n")
setup(
install_requires=install_requires,
extras_require={
"dev": dev_requires,
"docs": ["sphinx", "sphinx-rtd-theme"]
}
)
| [
"setuptools.setup"
] | [((319, 441), 'setuptools.setup', 'setup', ([], {'install_requires': 'install_requires', 'extras_require': "{'dev': dev_requires, 'docs': ['sphinx', 'sphinx-rtd-theme']}"}), "(install_requires=install_requires, extras_require={'dev':\n dev_requires, 'docs': ['sphinx', 'sphinx-rtd-theme']})\n", (324, 441), False, 'from setuptools import setup\n')] |
import sys, re
if __name__=='__main__':
sys.path.append(sys.path[0]+'\\..')
from body.bone import NetP
from body.soul import Karma
from body.body_motor import Motor
from body.body_pool import Pool
from body.body_brain import Brain
from body.body_debugger import Debugger
from tools import tools_sl, tools_basic
from PyQt5.QtWidgets import QTextEdit, QApplication, QMessageBox, QFontDialog
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QTextCursor, QFont
# import matlab.engine
class Editor(QTextEdit):
def __init__(self,name):
super().__init__()
self.m_self=None
self.m_pool=None
self.m_motor=None
self.m_debugger=None
self.m_screen=None
self.m_plainText=None
self.m_readPtr=None
self.m_currentFile=''
self.m_changed=False
self.textChanged.connect(self.changed)
self.m_systemMark='\n-----------系统-----------\n'
def initialize(self,point):
if point==None:
point=NetP('editor')
self.m_self=point
point.m_dev=self
point.m_permission=0
pt_text=tools_basic.getPoint(point,'m_plainText','text')
pt_pool=tools_basic.getPoint(point,'m_pool','pool')
pt_motor=tools_basic.getPoint(point,'m_motor','compiler')
pt_debugger=tools_basic.getPoint(point,'m_debugger','debugger')
pt_screen=tools_basic.getPoint(point,'m_screen','screen')
self.modifyPtStruct(pt_debugger,pt_motor,pt_pool)
self.m_plainText=pt_text
self.setReadPtr(pt_text)
self.m_pool=Pool(pt_pool)
self.m_motor=Motor(pt_motor)
self.m_debugger=Debugger(pt_debugger)
self.m_screen=Brain(pt_screen)
self.m_pool.register(self.m_screen.m_self)
self.m_pool.register(self.m_debugger.m_self)
self.updateByPts()
self.setFont(QFont('宋体'))
self.setStyleSheet('font: 20px;')
self.show()
def modifyPtStruct(self,pt_debugger,pt_motor,pt_pool):
tools_basic.setPoint(pt_debugger,'m_motor',pt_motor)
tools_basic.setPoint(pt_motor,'m_source',pt_pool)
pt_lib=tools_basic.getPoint(pt_pool,'m_lib')
tools_basic.setPoint(pt_lib,'m_motor',pt_motor)
def resizeEvent(self, QResizeEvent):
self.updateSysPts()
return super().resizeEvent(QResizeEvent)
def keyPressEvent(self, QKeyEvent):
modifier=QApplication.keyboardModifiers()
if modifier==Qt.ControlModifier:
if QKeyEvent.key()==Qt.Key_S:
self.saveAsFile()
elif QKeyEvent.key()==Qt.Key_R:
self.runCode()
elif QKeyEvent.key()==Qt.Key_T:
self.debugCode()
elif QKeyEvent.key()==Qt.Key_Q:
self.setReadPtr(self.m_plainText)
return super().keyPressEvent(QKeyEvent)
def openFile(self,fileName):
[text1,text2]=self.readFile(fileName)
if text1==None and text2==None:
return False
self.m_currentFile=fileName
self.loadText(text1,text2)
self.m_changed=False
self.updateState()
return True
def readFile(self,fileName):
try:
f=open(fileName,encoding='gbk')
except:
print("The file, "+fileName+", doesn't exist.")
return [None,None]
try:
textGbk=f.read()
except:
textGbk=None
f.close()
f=open(fileName,encoding='utf-8')
try:
textUtf=f.read()
except:
textUtf=None
f.close()
return [textGbk,textUtf]
def loadText(self,text1,text2):
head=None
if text1==None:
code,ni=self.fixFormat(text2)
elif text2==None:
code,ni=self.fixFormat(text1)
else:
code1,n1=self.fixFormat(text1)
code2,n2=self.fixFormat(text2)
if n1==-1:
code=code2
else:
code=code1
list_pt=tools_basic.buildPoints_tokener(code)
# for point in list_pt:
# point.m_permission=0
# if point.m_db[0]!=None or point.m_db[1]!=None:
# continue
# for con in point.m_con:
# if con.m_db[1]==point:
# break
# head=point
head=list_pt[0]
self.initialize(head)
# for point in list_pt:
# if point.m_name=='in':
# print(point.info(),point.m_permission)
def fixFormat(self,text):
ni=text.find(self.m_systemMark)
# old fashion
if ni!=0:
# code='editor(,);m_plainText(editor,text);text\"'+code+'\"(,);'
code=self.transferCode(text)
# new fashion
else:
code=text[len(self.m_systemMark):]
return code,ni
def transferCode(self,text):
plainText,sysPt,nrmPt=self.takeParts_oldFasion(text)
code='editor(,);m_plainText(editor,text);text\"'+plainText\
+'\"(,);m_pool(editor,pool);pool(,);m_contain(pool,points);'+\
'points\"'+nrmPt+'\"(,);'
return code
def takeParts_oldFasion(self,wholeText):
normalMark='\n----------普通----------\n'
systemMark='\n----------系统----------\n'
n=wholeText.rfind(normalMark)
if n==-1:
return [wholeText,'','']
s=wholeText.rfind(systemMark,0,n)
if s==-1:
return [wholeText,'','']
return [wholeText[0:s],wholeText[s+len(systemMark):n],wholeText[n+len(normalMark):]]
def saveAsFile(self,fileName=None):
if fileName==None:
fileName=self.m_currentFile
if fileName=='':
QMessageBox.Warning(self,"Save failed!","Warning: the file name can't be empty")
text=self.m_systemMark+self.saveText()
f=open(fileName,'+w')
f.write(text)
f.close()
self.m_currentFile=fileName
self.m_changed=False
self.updateState()
def saveText(self):
list_pt=tools_basic.getAllSystemPt(self.m_self)
return tools_basic.writeStdCode([],list_pt)
def updateState(self):
title=''
if self.m_changed==True:
title='*'
i=self.m_currentFile.rfind('\\')
if i+1==len(self.m_currentFile):
i=-1
title+=self.m_currentFile[i+1:]
if self.m_readPtr!=self.m_plainText:
title+=': '+self.m_readPtr.info(1)
self.setWindowTitle(title)
def changed(self):
self.m_changed=True
self.updateState()
if self.m_self!=None:
# pt_text=tools_basic.getPoint(self.m_self,'m_plainText')
# pt_text.m_text=self.toPlainText()
self.m_readPtr.m_text=self.toPlainText()
def runCode(self):
# complete the selection area
text=self.toPlainText()
cursor=self.textCursor()
s=cursor.selectionStart()
e=cursor.selectionEnd()
ns=text.rfind('\n',0,s)+1
ne=text.find('\n',e,-1)
cursor=self.selectText(ns,ne)
code=cursor.selectedText().replace("\u2029",'\n')
# operate code
operation_pool=self.m_motor.m_inputs
if self.m_self not in operation_pool:
operation_pool.append(self.m_self)
outputs=self.m_motor.runCode(code)
operation_pool.remove(self.m_self)
self.m_pool.input(outputs)
def debugCode(self):
# complete the selection area
text=self.toPlainText()
cursor=self.textCursor()
s=cursor.selectionStart()
e=cursor.selectionEnd()
ns=text.rfind('\n',0,s)+1
ne=text.find('\n',e,-1)
cursor=self.selectText(ns,ne)
code=cursor.selectedText().replace("\u2029",'\n')
#debug
if self.m_debugger.isVisible()==False:
self.m_debugger.setVisible(True)
self.m_debugger.reset(code)
def setReadPtr(self,pt_text):
self.m_readPtr=pt_text
self.setPlainText(pt_text.m_text)
def selectText(self,start,end):
cursor=self.textCursor()
cursor.movePosition(QTextCursor.Start)
cursor.movePosition(QTextCursor.Right,QTextCursor.MoveAnchor,start)
if end==-1:
cursor.movePosition(QTextCursor.End,QTextCursor.KeepAnchor)
else:
cursor.movePosition(QTextCursor.Right,QTextCursor.KeepAnchor,end-start)
self.setTextCursor(cursor)
return cursor
######## functions interact with points
def updateSysPts(self):
pt_x=tools_basic.getPoint(self.m_self,'m_x')
pt_y=tools_basic.getPoint(self.m_self,'m_y')
pt_height=tools_basic.getPoint(self.m_self,'m_height')
pt_width=tools_basic.getPoint(self.m_self,'m_width')
pt_x.m_name=str(self.geometry().x())
pt_y.m_name=str(self.geometry().y())
pt_width.m_name=str(self.geometry().width())
pt_height.m_name=str(self.geometry().height())
def updateByPts(self):
pt_x=tools_basic.getPoint(self.m_self,'m_x','300')
pt_y=tools_basic.getPoint(self.m_self,'m_y','300')
pt_height=tools_basic.getPoint(self.m_self,'m_height','600')
pt_width=tools_basic.getPoint(self.m_self,'m_width','300')
x=int(pt_x.m_name)
y=int(pt_y.m_name)
width=int(pt_width.m_name)
height=int(pt_height.m_name)
self.setGeometry(x,y,width,height)
if __name__=="__main__":
app=QApplication(sys.argv)
editor=Editor("editor")
if len(sys.argv)<2:
print("Invalid file name!")
else:
print(sys.argv[1])
editor.openFile(sys.argv[1])
sys.exit(app.exec_()) | [
"body.body_pool.Pool",
"body.bone.NetP",
"PyQt5.QtGui.QFont",
"tools.tools_basic.setPoint",
"tools.tools_basic.buildPoints_tokener",
"body.body_brain.Brain",
"tools.tools_basic.getAllSystemPt",
"PyQt5.QtWidgets.QMessageBox.Warning",
"PyQt5.QtWidgets.QApplication",
"body.body_debugger.Debugger",
... | [((46, 83), 'sys.path.append', 'sys.path.append', (["(sys.path[0] + '\\\\..')"], {}), "(sys.path[0] + '\\\\..')\n", (61, 83), False, 'import sys, re\n'), ((9798, 9820), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (9810, 9820), False, 'from PyQt5.QtWidgets import QTextEdit, QApplication, QMessageBox, QFontDialog\n'), ((1170, 1220), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['point', '"""m_plainText"""', '"""text"""'], {}), "(point, 'm_plainText', 'text')\n", (1190, 1220), False, 'from tools import tools_sl, tools_basic\n'), ((1236, 1281), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['point', '"""m_pool"""', '"""pool"""'], {}), "(point, 'm_pool', 'pool')\n", (1256, 1281), False, 'from tools import tools_sl, tools_basic\n'), ((1298, 1348), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['point', '"""m_motor"""', '"""compiler"""'], {}), "(point, 'm_motor', 'compiler')\n", (1318, 1348), False, 'from tools import tools_sl, tools_basic\n'), ((1368, 1421), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['point', '"""m_debugger"""', '"""debugger"""'], {}), "(point, 'm_debugger', 'debugger')\n", (1388, 1421), False, 'from tools import tools_sl, tools_basic\n'), ((1439, 1488), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['point', '"""m_screen"""', '"""screen"""'], {}), "(point, 'm_screen', 'screen')\n", (1459, 1488), False, 'from tools import tools_sl, tools_basic\n'), ((1639, 1652), 'body.body_pool.Pool', 'Pool', (['pt_pool'], {}), '(pt_pool)\n', (1643, 1652), False, 'from body.body_pool import Pool\n'), ((1675, 1690), 'body.body_motor.Motor', 'Motor', (['pt_motor'], {}), '(pt_motor)\n', (1680, 1690), False, 'from body.body_motor import Motor\n'), ((1716, 1737), 'body.body_debugger.Debugger', 'Debugger', (['pt_debugger'], {}), '(pt_debugger)\n', (1724, 1737), False, 'from body.body_debugger import Debugger\n'), ((1761, 1777), 'body.body_brain.Brain', 'Brain', (['pt_screen'], {}), '(pt_screen)\n', (1766, 1777), False, 'from body.body_brain import Brain\n'), ((2084, 2138), 'tools.tools_basic.setPoint', 'tools_basic.setPoint', (['pt_debugger', '"""m_motor"""', 'pt_motor'], {}), "(pt_debugger, 'm_motor', pt_motor)\n", (2104, 2138), False, 'from tools import tools_sl, tools_basic\n'), ((2146, 2197), 'tools.tools_basic.setPoint', 'tools_basic.setPoint', (['pt_motor', '"""m_source"""', 'pt_pool'], {}), "(pt_motor, 'm_source', pt_pool)\n", (2166, 2197), False, 'from tools import tools_sl, tools_basic\n'), ((2212, 2250), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['pt_pool', '"""m_lib"""'], {}), "(pt_pool, 'm_lib')\n", (2232, 2250), False, 'from tools import tools_sl, tools_basic\n'), ((2259, 2308), 'tools.tools_basic.setPoint', 'tools_basic.setPoint', (['pt_lib', '"""m_motor"""', 'pt_motor'], {}), "(pt_lib, 'm_motor', pt_motor)\n", (2279, 2308), False, 'from tools import tools_sl, tools_basic\n'), ((2491, 2523), 'PyQt5.QtWidgets.QApplication.keyboardModifiers', 'QApplication.keyboardModifiers', ([], {}), '()\n', (2521, 2523), False, 'from PyQt5.QtWidgets import QTextEdit, QApplication, QMessageBox, QFontDialog\n'), ((4160, 4197), 'tools.tools_basic.buildPoints_tokener', 'tools_basic.buildPoints_tokener', (['code'], {}), '(code)\n', (4191, 4197), False, 'from tools import tools_sl, tools_basic\n'), ((6265, 6304), 'tools.tools_basic.getAllSystemPt', 'tools_basic.getAllSystemPt', (['self.m_self'], {}), '(self.m_self)\n', (6291, 6304), False, 'from tools import tools_sl, tools_basic\n'), ((6321, 6358), 'tools.tools_basic.writeStdCode', 'tools_basic.writeStdCode', (['[]', 'list_pt'], {}), '([], list_pt)\n', (6345, 6358), False, 'from tools import tools_sl, tools_basic\n'), ((8857, 8897), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['self.m_self', '"""m_x"""'], {}), "(self.m_self, 'm_x')\n", (8877, 8897), False, 'from tools import tools_sl, tools_basic\n'), ((8911, 8951), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['self.m_self', '"""m_y"""'], {}), "(self.m_self, 'm_y')\n", (8931, 8951), False, 'from tools import tools_sl, tools_basic\n'), ((8970, 9015), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['self.m_self', '"""m_height"""'], {}), "(self.m_self, 'm_height')\n", (8990, 9015), False, 'from tools import tools_sl, tools_basic\n'), ((9033, 9077), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['self.m_self', '"""m_width"""'], {}), "(self.m_self, 'm_width')\n", (9053, 9077), False, 'from tools import tools_sl, tools_basic\n'), ((9327, 9374), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['self.m_self', '"""m_x"""', '"""300"""'], {}), "(self.m_self, 'm_x', '300')\n", (9347, 9374), False, 'from tools import tools_sl, tools_basic\n'), ((9387, 9434), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['self.m_self', '"""m_y"""', '"""300"""'], {}), "(self.m_self, 'm_y', '300')\n", (9407, 9434), False, 'from tools import tools_sl, tools_basic\n'), ((9452, 9504), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['self.m_self', '"""m_height"""', '"""600"""'], {}), "(self.m_self, 'm_height', '600')\n", (9472, 9504), False, 'from tools import tools_sl, tools_basic\n'), ((9521, 9572), 'tools.tools_basic.getPoint', 'tools_basic.getPoint', (['self.m_self', '"""m_width"""', '"""300"""'], {}), "(self.m_self, 'm_width', '300')\n", (9541, 9572), False, 'from tools import tools_sl, tools_basic\n'), ((1053, 1067), 'body.bone.NetP', 'NetP', (['"""editor"""'], {}), "('editor')\n", (1057, 1067), False, 'from body.bone import NetP\n'), ((1936, 1947), 'PyQt5.QtGui.QFont', 'QFont', (['"""宋体"""'], {}), "('宋体')\n", (1941, 1947), False, 'from PyQt5.QtGui import QTextCursor, QFont\n'), ((5924, 6010), 'PyQt5.QtWidgets.QMessageBox.Warning', 'QMessageBox.Warning', (['self', '"""Save failed!"""', '"""Warning: the file name can\'t be empty"""'], {}), '(self, \'Save failed!\',\n "Warning: the file name can\'t be empty")\n', (5943, 6010), False, 'from PyQt5.QtWidgets import QTextEdit, QApplication, QMessageBox, QFontDialog\n')] |
from django.views.generic import DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from schedule.models import Calendar
from schedule.views import CreateEventView, EditEventView, EventMixin
from apps.events.forms import CustomEventForm
class CustomCreateEventView(CreateEventView):
form_class = CustomEventForm
template_name = 'event/edit.html'
def get_context_data(self, **kwargs):
context = super(CustomCreateEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
extra_context = {
"calendar": calendar,
}
context.update(extra_context)
return context
def form_valid(self, form):
super(CustomCreateEventView, self).form_valid(form)
messages.error(self.request, 'Event created successfully.')
return HttpResponseRedirect(
reverse('calendar_details', kwargs={'calendar_slug': self.kwargs.get('calendar_slug')})
)
class CustomUpdateEventView(EditEventView):
form_class = CustomEventForm
template_name = 'event/edit.html'
def get_context_data(self, **kwargs):
context = super(CustomUpdateEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
extra_context = {
"calendar": calendar,
}
context.update(extra_context)
return context
def form_valid(self, form):
super(CustomUpdateEventView, self).form_valid(form)
messages.error(self.request, 'Event edited successfully.')
return HttpResponseRedirect(
reverse('calendar_details', kwargs={'calendar_slug': self.kwargs.get('calendar_slug')})
)
class CustomDeleteEventView(LoginRequiredMixin, EventMixin, DeleteView):
"""Delete Event"""
template_name = 'event/delete.html'
def get_success_url(self):
return reverse('calendar_details', args=[self.kwargs.get('calendar_slug')])
def get_context_data(self, **kwargs):
context = super(CustomDeleteEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
context.update(
{
'event': self.object,
'calendar': calendar
}
)
return context
| [
"django.contrib.messages.error"
] | [((975, 1034), 'django.contrib.messages.error', 'messages.error', (['self.request', '"""Event created successfully."""'], {}), "(self.request, 'Event created successfully.')\n", (989, 1034), False, 'from django.contrib import messages\n'), ((1740, 1798), 'django.contrib.messages.error', 'messages.error', (['self.request', '"""Event edited successfully."""'], {}), "(self.request, 'Event edited successfully.')\n", (1754, 1798), False, 'from django.contrib import messages\n')] |
"""Defines the application configuration for the product application"""
from __future__ import unicode_literals
from django.apps import AppConfig
class ProductConfig(AppConfig):
"""Configuration for the product application"""
name = 'product'
label = 'product'
verbose_name = 'Product'
def ready(self):
"""Registers the product implementations with other applications."""
from job.configuration.data.data_file import DATA_FILE_STORE
from product.configuration.product_data_file import ProductDataFileStore
# Register product files for the data file store
DATA_FILE_STORE['DATA_FILE_STORE'] = ProductDataFileStore()
| [
"product.configuration.product_data_file.ProductDataFileStore"
] | [((658, 680), 'product.configuration.product_data_file.ProductDataFileStore', 'ProductDataFileStore', ([], {}), '()\n', (678, 680), False, 'from product.configuration.product_data_file import ProductDataFileStore\n')] |
# -*- coding: utf-8 -*-
from dateutil.parser import isoparse
class ISODateTime(object):
def __init__(self, initval=None):
self.val = initval
def __get__(self, obj, obj_type):
return self.val
def __set__(self, obj, string_date):
if string_date is None:
self.val = None
else:
# 2016-01-30T23:50+00:00
self.val = isoparse(string_date)
| [
"dateutil.parser.isoparse"
] | [((394, 415), 'dateutil.parser.isoparse', 'isoparse', (['string_date'], {}), '(string_date)\n', (402, 415), False, 'from dateutil.parser import isoparse\n')] |
# Copyright 2018 Cognibit Solutions LLP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Generates classification report for the trained XGBoost models
"""
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix as cm
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report as report
def preprocessing(results, truth):
# preprocessing
results.loc[truth['before']==truth['after'],'truth']='RemainSelf'
results.loc[truth['before']!=truth['after'],'truth']='ToBeNormalized'
truth['class']=''
truth.loc[truth['before']!=truth['after'],'class']='ToBeNormalized'
truth.loc[truth['before']==truth['after'],'class']='RemainSelf'
return results, truth
def f1_scores(results, truth):
print(report(truth['class'].tolist(), results['class'].tolist()))
def confusion_matrix(results, truth, lang):
matrix = cm(truth['class'].tolist(), results['class'].tolist())
plot_confusion_matrix(matrix, classes=['ToBeNormalized', 'RemainSelf'],
title='XGBoost Confusion Matrix [{}]'.format(lang))
def pr_curve(results, truth, lang):
truth.loc[truth['class']=='ToBeNormalized', 'class'] = 1
truth.loc[truth['class']=='RemainSelf', 'class'] = 0
results.loc[results['class']=='ToBeNormalized', 'class'] = 1
results.loc[results['class']=='RemainSelf', 'class'] = 0
average_precision = average_precision_score(truth['class'].tolist(), results['class'].tolist())
precision, recall, threshold = precision_recall_curve(truth['class'].tolist(), results['class'].tolist())
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall Curve: AP={0:0.2f} [{1}]'.format(average_precision, lang))
plt.show()
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
| [
"matplotlib.pyplot.imshow",
"sklearn.metrics.confusion_matrix.max",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_la... | [((2206, 2269), 'matplotlib.pyplot.step', 'plt.step', (['recall', 'precision'], {'color': '"""b"""', 'alpha': '(0.2)', 'where': '"""post"""'}), "(recall, precision, color='b', alpha=0.2, where='post')\n", (2214, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2271, 2328), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['recall', 'precision'], {'alpha': '(0.2)', 'color': '"""b"""'}), "(recall, precision, alpha=0.2, color='b')\n", (2287, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2330, 2350), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (2340, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2375), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (2362, 2375), True, 'import matplotlib.pyplot as plt\n'), ((2377, 2398), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (2385, 2398), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2420), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2408, 2420), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2520), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2518, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2736, 2786), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (2746, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2791, 2807), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2800, 2807), True, 'import matplotlib.pyplot as plt\n'), ((2812, 2826), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2824, 2826), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2916), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (2882, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2921, 2952), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (2931, 2952), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3258), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (3244, 3258), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3292), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (3273, 3292), True, 'import matplotlib.pyplot as plt\n'), ((3297, 3315), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3313, 3315), True, 'import matplotlib.pyplot as plt\n'), ((2981, 2989), 'sklearn.metrics.confusion_matrix.max', 'cm.max', ([], {}), '()\n', (2987, 2989), True, 'from sklearn.metrics import confusion_matrix as cm\n')] |
import numpy as np
from models import *
from datasets import *
from util import parse_funct_arguments
import pickle
import itertools
def mse(y_true, y_mdl):
return np.mean((y_true - y_mdl)**2)
def train(mdl, dset):
# Get train
u_train, y_train = dset.get_train()
# Fit
X_train, z_train = construct_linear_system(u_train, y_train, dset.nus, dset.nys)
mdl = mdl.fit(X_train, z_train)
return mdl
def evaluate(mdl, dset):
# Get test
u_train, y_train = dset.get_train()
u_test, y_test = dset.get_test()
X_train, z_train = construct_linear_system(u_train, y_train, dset.nus, dset.nys)
X_test, z_test = construct_linear_system(u_test, y_test, dset.nus, dset.nys)
# One-step-ahead prediction
y_pred_train = back_to_original_shape(mdl.predict(X_train), n_seq=y_train.shape[1], n_out=y_train.shape[2])
y_pred_test = back_to_original_shape(mdl.predict(X_test), n_seq=y_test.shape[1], n_out=y_test.shape[2])
# Free run simulation
simulate = DynamicalSystem(dset.nys, dset.nus, mdl.predict, sd_v=0, sd_w=0)
y_sim_train = simulate(u_train)[simulate.order:, ...]
y_sim_test = simulate(u_test)[simulate.order:, ...]
d = {'mdl': repr(mdl), 'dset': repr(dset),
'mse_pred_train': mse(y_train[simulate.order:, ...], y_pred_train),
'mse_pred_test': mse(y_test[simulate.order:, ...], y_pred_test),
'mse_sim_train': mse(y_train[simulate.order:, ...], y_sim_train),
'mse_sim_test': mse(y_test[simulate.order:, ...], y_sim_test)
}
if hasattr(mdl, 'param_norm'):
d['param_norm'] = mdl.param_norm
pred_train = {'z_pred_train': y_pred_train, 'z_sim_train': y_sim_train}
pred_test = {'z_pred_test': y_pred_test, 'z_sim_test': y_sim_test}
return d, pred_train, pred_test
# ---- Main script ----
if __name__ == "__main__":
from tqdm import tqdm
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser(description='Estimate NARX model for different n features / n samples rate.')
parser.add_argument('-r', '--repetitions', default=1, type=int,
help='number of repetitions')
parser.add_argument('-o', '--output', default='./performance.csv',
help='output csv file.')
parser.add_argument('-d', '--dset', type=str, default='ChenDSet',
help='number of repetitions')
parser.add_argument('-m', '--nonlinear_model', default='RBFSampler',
help='number of repetitions')
parser.add_argument('-n', '--num_points', default=60, type=int,
help='number of points')
parser.add_argument('-l', '--lower_proportion', default=-1, type=float,
help='the lowest value for the proportion (n features / n samples) is 10^l.')
parser.add_argument('-u', '--upper_proportion', default=2, type=float,
help='the upper value for the proportion (n features / n samples) is 10^u.')
parser.add_argument('-s', '--save_models', nargs='?', default='', const='./models',
help='save intermediary models.')
parser.add_argument('-w', '--reuse_weights', action='store_true',
help='use weights from previous model (with less features) when estimate the next one.')
args, unk = parser.parse_known_args()
# Saving models (when needed)
if args.save_models:
if not os.path.isdir(args.save_models):
os.mkdir(args.save_models)
def save_mdl(mdl):
fname = os.path.join(args.save_models, repr(mdl)+'.pkl')
with open(fname, 'wb') as f:
pickle.dump(mdl, f)
else:
def save_mdl(_mdl):
pass
# Get model (from command line)
ModelTmp = eval(args.nonlinear_model)
Model, _, unk = parse_funct_arguments(ModelTmp, unk, free_arguments=['n_features', 'random_state'])
# Get dataset (from the command line)
DatasetTmp = eval(args.dset)
Dataset, _, unk = parse_funct_arguments(DatasetTmp, unk)
dset = Dataset()
tqdm.write("Estimating baseline performance...")
baseline_mdl = Linear()
baseline_list = []
for seed in tqdm(range(args.repetitions)):
np.random.seed(seed)
d, pred_train, pred_test = evaluate(train(baseline_mdl, dset), dset)
d['seed'] = seed
d['proportion'] = 0 # To signal it is the baseline (n features being a constant)
baseline_list.append(d)
# Save model
save_mdl(baseline_mdl)
df = pd.DataFrame(baseline_list)
df.to_csv(args.output, index=False)
tqdm.write("Done")
tqdm.write("Estimating performance as a function of proportion...")
list_dict = []
underp = np.logspace(args.lower_proportion, 0, args.num_points // 2)
overp = np.logspace(0.00001, args.upper_proportion, args.num_points - args.num_points // 2)
proportions = np.concatenate((underp, overp))
run_instances = list(itertools.product(range(args.repetitions), proportions))
prev_mdl = None # used only if reuse_weights is True
num_samples = dset.effective_num_train_samples
for seed, proportion in tqdm(run_instances):
n_features = int(proportion * num_samples)
mdl = Model(n_features=n_features, random_state=seed)
if args.reuse_weights and hasattr(mdl, 'reuse_weights_from_mdl'):
if prev_mdl is not None:
mdl.reuse_weights_from_mdl(prev_mdl)
prev_mdl = mdl
d, pred_train, pred_test = evaluate(train(mdl, dset), dset)
d['proportion'] = proportion
d['seed'] = seed
df = df.append(d, ignore_index=True)
df.to_csv(args.output, index=False)
# Save model
save_mdl(mdl)
tqdm.write("Done")
| [
"numpy.mean",
"pickle.dump",
"argparse.ArgumentParser",
"tqdm.tqdm.write",
"tqdm.tqdm",
"os.path.isdir",
"util.parse_funct_arguments",
"numpy.concatenate",
"numpy.random.seed",
"pandas.DataFrame",
"os.mkdir",
"numpy.logspace"
] | [((170, 200), 'numpy.mean', 'np.mean', (['((y_true - y_mdl) ** 2)'], {}), '((y_true - y_mdl) ** 2)\n', (177, 200), True, 'import numpy as np\n'), ((1948, 2054), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Estimate NARX model for different n features / n samples rate."""'}), "(description=\n 'Estimate NARX model for different n features / n samples rate.')\n", (1971, 2054), False, 'import argparse\n'), ((3860, 3947), 'util.parse_funct_arguments', 'parse_funct_arguments', (['ModelTmp', 'unk'], {'free_arguments': "['n_features', 'random_state']"}), "(ModelTmp, unk, free_arguments=['n_features',\n 'random_state'])\n", (3881, 3947), False, 'from util import parse_funct_arguments\n'), ((4042, 4080), 'util.parse_funct_arguments', 'parse_funct_arguments', (['DatasetTmp', 'unk'], {}), '(DatasetTmp, unk)\n', (4063, 4080), False, 'from util import parse_funct_arguments\n'), ((4107, 4155), 'tqdm.tqdm.write', 'tqdm.write', (['"""Estimating baseline performance..."""'], {}), "('Estimating baseline performance...')\n", (4117, 4155), False, 'from tqdm import tqdm\n'), ((4568, 4595), 'pandas.DataFrame', 'pd.DataFrame', (['baseline_list'], {}), '(baseline_list)\n', (4580, 4595), True, 'import pandas as pd\n'), ((4640, 4658), 'tqdm.tqdm.write', 'tqdm.write', (['"""Done"""'], {}), "('Done')\n", (4650, 4658), False, 'from tqdm import tqdm\n'), ((4664, 4731), 'tqdm.tqdm.write', 'tqdm.write', (['"""Estimating performance as a function of proportion..."""'], {}), "('Estimating performance as a function of proportion...')\n", (4674, 4731), False, 'from tqdm import tqdm\n'), ((4764, 4823), 'numpy.logspace', 'np.logspace', (['args.lower_proportion', '(0)', '(args.num_points // 2)'], {}), '(args.lower_proportion, 0, args.num_points // 2)\n', (4775, 4823), True, 'import numpy as np\n'), ((4836, 4921), 'numpy.logspace', 'np.logspace', (['(1e-05)', 'args.upper_proportion', '(args.num_points - args.num_points // 2)'], {}), '(1e-05, args.upper_proportion, args.num_points - args.num_points //\n 2)\n', (4847, 4921), True, 'import numpy as np\n'), ((4938, 4969), 'numpy.concatenate', 'np.concatenate', (['(underp, overp)'], {}), '((underp, overp))\n', (4952, 4969), True, 'import numpy as np\n'), ((5189, 5208), 'tqdm.tqdm', 'tqdm', (['run_instances'], {}), '(run_instances)\n', (5193, 5208), False, 'from tqdm import tqdm\n'), ((5780, 5798), 'tqdm.tqdm.write', 'tqdm.write', (['"""Done"""'], {}), "('Done')\n", (5790, 5798), False, 'from tqdm import tqdm\n'), ((4262, 4282), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4276, 4282), True, 'import numpy as np\n'), ((3460, 3491), 'os.path.isdir', 'os.path.isdir', (['args.save_models'], {}), '(args.save_models)\n', (3473, 3491), False, 'import os\n'), ((3505, 3531), 'os.mkdir', 'os.mkdir', (['args.save_models'], {}), '(args.save_models)\n', (3513, 3531), False, 'import os\n'), ((3686, 3705), 'pickle.dump', 'pickle.dump', (['mdl', 'f'], {}), '(mdl, f)\n', (3697, 3705), False, 'import pickle\n')] |
from pymodm import MongoModel, fields
from models.target import Target
from models.user import User
class Dive(MongoModel):
diver = fields.ReferenceField(User)
target = fields.ReferenceField(Target)
created_at = fields.DateTimeField()
location_correct = fields.BooleanField()
new_x_coordinate = fields.CharField(blank=True)
new_y_coordinate = fields.CharField(blank=True)
new_location_explanation = fields.CharField(blank=True)
change_text = fields.CharField(blank=True)
miscellaneous = fields.CharField(blank=True)
class Meta:
connection_alias = 'app'
final = True
@staticmethod
def create(
diver,
target,
location_correct,
created_at,
new_x_coordinate=None,
new_y_coordinate=None,
new_location_explanation=None,
change_text=None,
miscellaneous=None
):
dive = Dive(
diver,
target,
created_at,
location_correct,
new_x_coordinate,
new_y_coordinate,
new_location_explanation,
change_text,
miscellaneous
)
dive.save()
return dive
def to_json(self):
return {
'id': str(self._id) or None,
'diver': self.diver.to_json(),
'target': self.target.to_json(),
'location_correct': self.location_correct,
'created_at': str(self.created_at),
'miscellanious': self.miscellaneous,
'change_text': self.change_text,
'new_x_coordinate': self.new_x_coordinate,
'new_y_coordinate': self.new_y_coordinate,
'new_location_explanation': self.new_location_explanation,
}
| [
"pymodm.fields.ReferenceField",
"pymodm.fields.BooleanField",
"pymodm.fields.CharField",
"pymodm.fields.DateTimeField"
] | [((138, 165), 'pymodm.fields.ReferenceField', 'fields.ReferenceField', (['User'], {}), '(User)\n', (159, 165), False, 'from pymodm import MongoModel, fields\n'), ((179, 208), 'pymodm.fields.ReferenceField', 'fields.ReferenceField', (['Target'], {}), '(Target)\n', (200, 208), False, 'from pymodm import MongoModel, fields\n'), ((226, 248), 'pymodm.fields.DateTimeField', 'fields.DateTimeField', ([], {}), '()\n', (246, 248), False, 'from pymodm import MongoModel, fields\n'), ((272, 293), 'pymodm.fields.BooleanField', 'fields.BooleanField', ([], {}), '()\n', (291, 293), False, 'from pymodm import MongoModel, fields\n'), ((317, 345), 'pymodm.fields.CharField', 'fields.CharField', ([], {'blank': '(True)'}), '(blank=True)\n', (333, 345), False, 'from pymodm import MongoModel, fields\n'), ((369, 397), 'pymodm.fields.CharField', 'fields.CharField', ([], {'blank': '(True)'}), '(blank=True)\n', (385, 397), False, 'from pymodm import MongoModel, fields\n'), ((429, 457), 'pymodm.fields.CharField', 'fields.CharField', ([], {'blank': '(True)'}), '(blank=True)\n', (445, 457), False, 'from pymodm import MongoModel, fields\n'), ((476, 504), 'pymodm.fields.CharField', 'fields.CharField', ([], {'blank': '(True)'}), '(blank=True)\n', (492, 504), False, 'from pymodm import MongoModel, fields\n'), ((525, 553), 'pymodm.fields.CharField', 'fields.CharField', ([], {'blank': '(True)'}), '(blank=True)\n', (541, 553), False, 'from pymodm import MongoModel, fields\n')] |
from tkinter import *
import os
from datetime import datetime
import webbrowser
from tkinter import messagebox
from tkinter import ttk
import tkinter.filedialog
import tkinter as tk
import openpyxl
from REPORTE import *
datos = [] #reporte
precios = [] #precios
preciosmq=[] #precios mq
subtotales = []
def CREAR_INTERFAZ():
def DIALOGO():
fd= tkinter.Tk()
fd.withdraw()
ruta=tkinter.filedialog.askopenfilename(
initialdir="C:",
filetypes=(
("Libro de Excel", "*.xlsx"),
("Libro de Excel 97 a Excel 2003", "*.xls"),
("Todos los Archivos de Excel","*.*")
),
title = "ABRIR ARCHIVO"
)
if ruta=="":
messagebox.showinfo(message="Debe cargar un archivo", title="ERROR")
else:
try:
print("------> "+ ruta)
rut.set("CARGA EXITOSA")
book2 = openpyxl.load_workbook(ruta, data_only=True)
celdas2 = book2.active
for row in range(2,celdas2.max_row +1):
if(celdas2.cell(row,1).value is not None):
precios.append(Datos(celdas2.cell(row,1).value,celdas2.cell(row,2).value, celdas2.cell(row,3).value))
finally:
print(" ************************** ")
print(" SUCCESSFULLY ")
print(" ************************** ")
def DIALOGO2():
fd= tkinter.Tk()
fd.withdraw()
ruta=tkinter.filedialog.askopenfilename(
initialdir="C:",
filetypes=(
("Libro de Excel", "*.xlsx"),
("Libro de Excel 97 a Excel 2003", "*.xls"),
("Todos los Archivos de Excel","*.*")
),
title = "ABRIR ARCHIVO"
)
if ruta=="":
messagebox.showinfo(message="Debe cargar un archivo", title="ERROR")
else:
try:
print("------> "+ ruta)
zm1.set("CARGA EXITOSA")
book2 = openpyxl.load_workbook(ruta, data_only=True)
celdas2 = book2.active
for row in range(2,celdas2.max_row +1):
if(celdas2.cell(row,1).value is not None):
preciosmq.append(Datos(celdas2.cell(row,1).value,celdas2.cell(row,2).value, celdas2.cell(row,3).value))
finally:
print(" ************************** ")
print(" SUCCESSFULLY ")
print(" ************************** ")
def DIALOGO_REPORTE():
TP=TIPO.get()
fd= tkinter.Tk()
fd.withdraw()
ruta=tkinter.filedialog.askopenfilename(
initialdir="C:",
filetypes=(
("Libro de Excel", "*.xlsx"),
("Libro de Excel 97 a Excel 2003", "*.xls"),
("Todos los Archivos de Excel","*.*")
),
title = "ABRIR ARCHIVO"
)
if ruta=="":
messagebox.showinfo(message="Debe cargar un archivo", title="ERROR")
else:
try:
print("------> "+ ruta)
rut.set("CARGA EXITOSA")
book = openpyxl.load_workbook(ruta, data_only=True)
celdas = book.active
for row in range(2,celdas.max_row):
if(celdas.cell(row,1).value is not None):
datos.append(Reporte(celdas.cell(row,1).value, celdas.cell(row,2).value, celdas.cell(row,3).value))
if TP=="MQ":
print("--------------IMPRIMIENDO SUBTOTALES-------------")
x=0
contador=0
while x<len(datos):
for i in preciosmq:
if datos[x].nombre.upper().replace(" ", "")==i.nombre.upper().replace(" ", ""):
contador+=1
subtotal=datos[x].entregado_usuario*i.precio
print(str(contador)+ ")" +datos[x].nombre +"="+ str(subtotal))
subtotales.append(Subtotal(contador,datos[x].codigo,datos[x].nombre,datos[x].entregado_usuario,subtotal))
break
x+=1
print("----------------------------------------")
TOTAL=0
for i in subtotales:
TOTAL+=i.subtotal
print("TOTAL = Q"+ str(TOTAL))
else:
print("--------------IMPRIMIENDO SUBTOTALES-------------")
x=0
contador=0
while x<len(datos):
for i in precios:
if datos[x].nombre.upper().replace(" ", "")==i.nombre.upper().replace(" ", ""):
contador+=1
subtotal=datos[x].entregado_usuario*i.precio
print(str(contador)+ ")" +datos[x].nombre +"="+ str(subtotal))
subtotales.append(Subtotal(contador,datos[x].codigo,datos[x].nombre,datos[x].entregado_usuario,subtotal))
break
x+=1
print("----------------------------------------")
TOTAL=0
for i in subtotales:
TOTAL+=i.subtotal
print("TOTAL = Q"+ str(TOTAL))
finally:
print(" ************************** ")
print(" SUCCESSFULLY ")
print(" ************************** ")
def VER_REPORTE():
#obteniendo datos de inputs
A=año.get()
MO=Mes_inicial.get()
M=Mes_final.get()
DEPA=dpto.get()
AR=area.get()
MUN=municipio.get()
TIPS=t_servicio.get()
SERV=servicio.get()
DIST=distrito.get()
f = open('REPORTE.html','w', encoding="utf-8")
f.write("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title><NAME></title>
<link href="img/icono.ico" rel="icon">
<!-- Custom fonts for this template-->
<link href="vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link
href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="css/sb-admin-2.min.css" rel="stylesheet">
<link href="vendor/datatables/dataTables.bootstrap4.min.css" rel="stylesheet">
</head>
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Sidebar -->
<ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar">
<!-- Sidebar - Brand -->
<a class="sidebar-brand d-flex align-items-center justify-content-center" href="REPORTE.html">
<div class="sidebar-brand-icon rotate-n-15">
<i class="fas fa-laugh-wink"></i>
</div>
<div class="sidebar-brand-text mx-3">ANALISIS</div>
</a>
<!-- Divider -->
<hr class="sidebar-divider my-0">
<!-- Nav Item - Dashboard -->
<li class="nav-item active">
<a class="nav-link" href="REPORTE.html">
<i class="fas fa-bars"></i>
<span>REPORTE</span></a>
</li>
<!-- Divider -->
<hr class="sidebar-divider">
<!-- Heading -->
<div class="sidebar-heading">
OTROS
</div>
<!-- Nav Item - Utilities Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities"
aria-expanded="true" aria-controls="collapseUtilities">
<i class="fas fa-fw fa-2x"></i>
<span>BRESS</span>
</a>
</li>
<!-- Divider -->
<hr class="sidebar-divider d-none d-md-block">
<!-- Sidebar Toggler (Sidebar) -->
<div class="text-center d-none d-md-inline">
<button class="rounded-circle border-0" id="sidebarToggle"></button>
</div>
</ul>
<!-- End of Sidebar -->
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Topbar -->
<nav class="navbar navbar-expand navbar-light bg-white topbar mb-4 static-top shadow">
<!-- Sidebar Toggle (Topbar) -->
<button id="sidebarToggleTop" class="btn btn-link d-md-none rounded-circle mr-3">
<i class="fa fa-bars"></i>
</button>
<!-- Topbar Navbar -->
<ul class="navbar-nav ml-auto">
<!-- Nav Item - Search Dropdown (Visible Only XS) -->
<li class="nav-item dropdown no-arrow d-sm-none">
<a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-search fa-fw"></i>
</a>
</li>
<div class="topbar-divider d-none d-sm-block"></div>
<!-- Nav Item - User Information -->
<li class="nav-item dropdown no-arrow">
<a class="nav-link dropdown-toggle" href="#" id="userDropdown" role="button"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mr-2 d-none d-lg-inline text-gray-600 small">Administrador</span>
<img class="img-profile rounded-circle"
src="img/undraw_profile.svg">
</a>
</li>
</ul>
</nav>
<!-- End of Topbar -->
<!-- Begin Page Content -->
<div class="container-fluid">
<!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">ÁREA DE SALUD DE CHIMALTENANGO</h1>
<a href="#" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm"><i
class="fas fa-download fa-sm text-white-50"></i> Descargar Reporte</a>
</div>
<!-- Content Row -->
<div class="row">
<!-- Earnings (Monthly) Card Example -->
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-primary shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-primary text-uppercase mb-1">
Departamento</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
""")
f.write(DEPA) #DEPARTAMENTO
f.write("""
</div>
</div>
<div class="col-auto">
<i class="fas fa-fw"></i>
</div>
</div>
</div>
</div>
</div>
<!-- Earnings (Monthly) Card Example -->
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-success shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-success text-uppercase mb-1">
Distrito</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
""")
f.write(DIST) #DISTRITO
f.write("""
</div>
</div>
<div class="col-auto">
<i class="fas fa-fw"></i>
</div>
</div>
</div>
</div>
</div>
<!-- Earnings (Monthly) Card Example -->
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-info shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-info text-uppercase mb-1">Del Mes
</div>
<div class="row no-gutters align-items-center">
<div class="col-auto">
<div class="h5 mb-0 mr-3 font-weight-bold text-gray-800">
""")
f.write(MO) #MES INICIAL
f.write("""
</div>
</div>
</div>
</div>
<div class="col-auto">
<i class="fas fa-calendar fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
<!-- Pending Requests Card Example -->
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-warning shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-warning text-uppercase mb-1">
Al mes</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
""")
f.write(M) #MES FINAL
f.write("""
</div>
</div>
<div class="col-auto">
<i class="fas fa-calendar fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Content Row -->
<div class="row">
<!-- TABLA RESUMEN-->
<h1 class="h3 mb-2 text-gray-800">
""")
f.write(MUN) # MUNICIPIO
f.write("""
</h1>
<p class="mb-4">Reporte de Balance, Requisición y Envío de Suministros</p>
<!-- TABLA DE MEDICAMENTOS Y MÉDIDO QUIRURGICO -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">
""")
f.write(TIPS) #TIPO DE SERVICIO
f.write("""
</h6>
</div>
<div class="card-body">
<div class="table-responsive">
<table class="table table-bordered" id="dataTable" width="100%" cellspacing="0">
<thead>
<tr>
<th>Número de orden</th>
<th>Código</th>
<th>Descripción de Articulo/Producto</th>
<th>Unidad de Medida</th>
<th>Cantidad Autorizada</th>
<th>Cantidad despachada</th>
<th>Subtotal</th>
</tr>
</thead>
<tfoot>
<th>Número de orden</th>
<th>Código</th>
<th>Descripción de Articulo/Producto</th>
<th>Unidad de Medida</th>
<th>Cantidad Autorizada</th>
<th>Cantidad despachada</th>
<th>Subtotal </th>
</tfoot>
<tbody>
""")
for i in subtotales:
p="{0:.2f}".format(float(i.subtotal))
f.write("<tr>")
f.write(" <td><center>"+str(i.id)+"</center></td>"
+"<td><center>"+str(i.codigo)+"</center></td>"
+"<td><center>"+str(i.nombre)+"</center></td>"
+"<td><center>"+"x"+"</center></td>"
+"<td><center>"+str(i.entregado)+"</center></td>"
+"<td><center>"+str(i.entregado)+"</center></td>"
+"<td><center>"+ "Q"+str(p)+"</center></td>"
)
f.write("<t/r>")
f.write("""
</tbody>
</table>
</div>
</div>
</div>
<!-- Content Row -->
<div class="row">
<!-- Content Column -->
<div class="col-auto">
</div>
</div>
</div>
<!-- /.container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>© Facultad de Ingeniería 2021</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
<!-- Page level plugins -->
<script src="vendor/chart.js/Chart.min.js"></script>
<!-- Page level custom scripts -->
<script src="js/demo/chart-area-demo.js"></script>
<script src="js/demo/chart-pie-demo.js"></script>
<!-- Page level plugins -->
<script src="vendor/datatables/jquery.dataTables.min.js"></script>
<script src="vendor/datatables/dataTables.bootstrap4.min.js"></script>
<!-- Page level custom scripts -->
<script src="js/demo/datatables-demo.js"></script>
</body>
</html>
""")
f.close()
webbrowser.open_new_tab('REPORTE.html')
#--------------CREANDO VENTANA PRINCIPAL--------------
root=Tk()
root.title("VOLUNTARIADO")
root.iconbitmap('img\icono.ico')
rut=StringVar()
zm1=StringVar()
nt=ttk.Notebook(root)
nt.pack(fill="both",expand="yes")
s = ttk.Style()
# Create style used by default for all Frames
s.configure('TFrame', background='#1F618D')
#--------------FRAME INICIO--------------
s.configure('Frame1.TFrame', background='#1F618D')
V1 = ttk.Frame(nt, style='Frame1.TFrame')
nt.add(V1, text="INICIO")
#--------------FRAME CARGAR ARCHIVOS--------------
s.configure('Frame2.TFrame', background='#1F618D')
V2 = ttk.Frame(nt, style='Frame2.TFrame')
nt.add(V2, text="PRECIOS")
Label(V2,textvariable=rut,font="Helvetica 16",bg="#1F618D").place(x=100,y=280)
rut.set("NO SE HA CARGADO NADA")
Button(V2,text="SELECCIONAR ARCHIVO",command=DIALOGO,font="Helvetica 12",height=5,width=25).place(x=120, y=110)
Label(V2,textvariable=zm1,font="Helvetica 16",bg="#1F618D").place(x=560,y=280)
zm1.set("NO SE HA CARGADO NADA")
Button(V2,text="SELECCIONAR ARCHIVO",command=DIALOGO2,font="Helvetica 12",height=5,width=25).place(x=520, y=110)
L1=StringVar()
l2=StringVar()
l3=StringVar()
xo=IntVar()
yo=IntVar()
Label(V2,textvariable=L1,font="Helvetica 16",bg="#1F618D").place(x=30,y=30)
L1.set("CARGAR ARCHIVO DE PRECIOS (MED)")
Label(V2,textvariable=l2,font="Helvetica 16",bg="#1F618D").place(x=500,y=30)
l2.set("CARGAR ARCHIVO DE PRECIOS (MQ)")
#--------------FRAME REPORTES--------------
s.configure('Frame3.TFrame', background='#1F618D')
V3 = ttk.Frame(nt, style='Frame3.TFrame')
nt.add(V3, text=" VISUALIZAR REPORTE")
icodoct=PhotoImage(file="img\doct.png")
icodoct.subsample(1,1)
#Button(V3,image=icodoct,font="Helvetica 14",width=300,height=300).place(x=100, y=130)
Label(V3,textvariable=rut,font="Helvetica 16",bg="#1F618D").place(x=150,y=400)
rut.set("NO SE HA CARGADO NADA")
Button(V3,text="SELECCIONAR ARCHIVO",command=DIALOGO_REPORTE,font="Helvetica 12").place(x=250, y=350)
Button(V3,text="VER REPORTE",command=VER_REPORTE,height=5,width=25,font="Helvetica 12").place(x=650, y=350)
L6=StringVar()
año=StringVar()
dpto=StringVar()
area=StringVar()
distrito=StringVar()
municipio=StringVar()
t_servicio=StringVar()
servicio=StringVar()
l9=StringVar()
l8=StringVar()
l7=StringVar()
l6=StringVar()
l5=StringVar()
l4=StringVar()
a=StringVar()
b=StringVar()
c=StringVar()
Label(V3,textvariable=L6,font="Helvetica 16",bg="#1F618D").place(x=70,y=30)
L6.set("DATOS PARA EL REPORTE")
Label(V3,textvariable=l9,font="Helvetica 12",bg="#1F618D",fg="white").place(x=75,y=140)
l9.set("Departamento")
"""Label(V3,textvariable=l8,font="Helvetica 12",bg="#1F618D",fg="white").place(x=75,y=180)
l8.set("Area")
Label(V3,textvariable=l7,font="Helvetica 12",bg="#1F618D",fg="white").place(x=75,y=220)
l7.set("Distrito")"""
Label(V3,textvariable=l6,font="Helvetica 12",bg="#1F618D",fg="white").place(x=75,y=180)
l6.set("Municipio")
Label(V3,textvariable=l5,font="Helvetica 12",bg="#1F618D",fg="white").place(x=475,y=180)
"""l5.set("Tipo de Servicio")
Label(V3,textvariable=l4,font="Helvetica 12",bg="#1F618D",fg="white").place(x=475,y=220)
l4.set("Servicio")"""
Label(V3,textvariable=a,font="Helvetica 12",bg="#1F618D",fg="white").place(x=450,y=40)
a.set("Año")
Label(V3,textvariable=b,font="Helvetica 12",bg="#1F618D",fg="white").place(x=570,y=40)
b.set("Del Mes")
Label(V3,textvariable=c,font="Helvetica 12",bg="#1F618D",fg="white").place(x=760,y=40)
c.set("Al mes")
Entry(V3,textvariable=año,font="Helvetica 11",width=5).place(x=500,y=40)
#Entry(V3,textvariable=Mes_inicial,font="Helvetica 11",width=10).place(x=650,y=40)
#Entry(V3,textvariable=Mes_final,font="Helvetica 11",width=10).place(x=820,y=40)
Mes_inicial=ttk.Combobox(V3,width=10,font="Helvetica 11",state="readonly")
Mes_inicial.place(x=650,y=40)
Mes_inicial['values']=('Enero','Febrero','Marzo ','Abril','Mayo','Junio','Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre')
Mes_final=ttk.Combobox(V3,width=10,font="Helvetica 11",state="readonly")
Mes_final.place(x=820,y=40)
Mes_final['values']=('Enero','Febrero','Marzo ','Abril','Mayo','Junio','Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre')
TIPO=ttk.Combobox(V3,width=10,font="Helvetica 14",state="readonly")
TIPO.place(x=100,y=350)
TIPO['values']=('MED','MQ')
Entry(V3,textvariable=dpto,font="Helvetica 12").place(x=200,y=140)
#Entry(V3,textvariable=distrito,font="Helvetica 12").place(x=200,y=180)
#Entry(V3,textvariable=t_servicio,font="Helvetica 12").place(x=200,y=220)
Entry(V3,textvariable=distrito,font="Helvetica 12").place(x=200,y=180)
#Entry(V3,textvariable=municipio,font="Helvetica 12").place(x=600,y=180)
#Entry(V3,textvariable=servicio,font="Helvetica 12").place(x=600,y=220)
root.geometry("950x550")
root.mainloop()
CREAR_INTERFAZ() | [
"tkinter.ttk.Style",
"openpyxl.load_workbook",
"tkinter.ttk.Frame",
"tkinter.ttk.Combobox",
"webbrowser.open_new_tab",
"tkinter.messagebox.showinfo",
"tkinter.ttk.Notebook"
] | [((24549, 24567), 'tkinter.ttk.Notebook', 'ttk.Notebook', (['root'], {}), '(root)\n', (24561, 24567), False, 'from tkinter import ttk\n'), ((24615, 24626), 'tkinter.ttk.Style', 'ttk.Style', ([], {}), '()\n', (24624, 24626), False, 'from tkinter import ttk\n'), ((24836, 24872), 'tkinter.ttk.Frame', 'ttk.Frame', (['nt'], {'style': '"""Frame1.TFrame"""'}), "(nt, style='Frame1.TFrame')\n", (24845, 24872), False, 'from tkinter import ttk\n'), ((25023, 25059), 'tkinter.ttk.Frame', 'ttk.Frame', (['nt'], {'style': '"""Frame2.TFrame"""'}), "(nt, style='Frame2.TFrame')\n", (25032, 25059), False, 'from tkinter import ttk\n'), ((26040, 26076), 'tkinter.ttk.Frame', 'ttk.Frame', (['nt'], {'style': '"""Frame3.TFrame"""'}), "(nt, style='Frame3.TFrame')\n", (26049, 26076), False, 'from tkinter import ttk\n'), ((28429, 28494), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['V3'], {'width': '(10)', 'font': '"""Helvetica 11"""', 'state': '"""readonly"""'}), "(V3, width=10, font='Helvetica 11', state='readonly')\n", (28441, 28494), False, 'from tkinter import ttk\n'), ((28683, 28748), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['V3'], {'width': '(10)', 'font': '"""Helvetica 11"""', 'state': '"""readonly"""'}), "(V3, width=10, font='Helvetica 11', state='readonly')\n", (28695, 28748), False, 'from tkinter import ttk\n'), ((28929, 28994), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['V3'], {'width': '(10)', 'font': '"""Helvetica 14"""', 'state': '"""readonly"""'}), "(V3, width=10, font='Helvetica 14', state='readonly')\n", (28941, 28994), False, 'from tkinter import ttk\n'), ((24308, 24347), 'webbrowser.open_new_tab', 'webbrowser.open_new_tab', (['"""REPORTE.html"""'], {}), "('REPORTE.html')\n", (24331, 24347), False, 'import webbrowser\n'), ((752, 820), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', ([], {'message': '"""Debe cargar un archivo"""', 'title': '"""ERROR"""'}), "(message='Debe cargar un archivo', title='ERROR')\n", (771, 820), False, 'from tkinter import messagebox\n'), ((1948, 2016), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', ([], {'message': '"""Debe cargar un archivo"""', 'title': '"""ERROR"""'}), "(message='Debe cargar un archivo', title='ERROR')\n", (1967, 2016), False, 'from tkinter import messagebox\n'), ((3176, 3244), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', ([], {'message': '"""Debe cargar un archivo"""', 'title': '"""ERROR"""'}), "(message='Debe cargar un archivo', title='ERROR')\n", (3195, 3244), False, 'from tkinter import messagebox\n'), ((957, 1001), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['ruta'], {'data_only': '(True)'}), '(ruta, data_only=True)\n', (979, 1001), False, 'import openpyxl\n'), ((2153, 2197), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['ruta'], {'data_only': '(True)'}), '(ruta, data_only=True)\n', (2175, 2197), False, 'import openpyxl\n'), ((3380, 3424), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['ruta'], {'data_only': '(True)'}), '(ruta, data_only=True)\n', (3402, 3424), False, 'import openpyxl\n')] |
""" Plot data split by compartments
Classes:
* :py:class:`CompartmentPlot`: compartment plotting tool
"""
# Standard lib
from typing import Tuple, Optional, Dict
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Our own imports
from .styling import set_plot_style
from .utils import bootstrap_ci, get_histogram
# Classes
class CompartmentPlot(object):
""" Plot data split by multiple compartments
:param int n_compartments:
How many compartments to split the data into
:param int topk:
How many samples to take from each compartment
"""
def __init__(self,
n_compartments: int,
topk: Optional[int] = None,
figsize: Tuple[int] = (8, 8),
plot_style: str = 'dark',
suffix: str = '.png'):
self.n_compartments = n_compartments
self.topk = topk
self.figsize = figsize
self.plot_style = plot_style
self.suffix = suffix
# Color palettes for the different compartments
self.colors = (['blue', 'orange', 'green', 'red', 'purple', 'grey'])[:n_compartments]
self.palletes = [sns.color_palette(c.capitalize()+'s', n_colors=10)
for c in self.colors]
# Calculated values
self._bin_indices = None
self._bin_values = None
self._xdata = None
self._xcolumn = None
self._ycolumn = None
self._plotdata = None
self._distdata = None
self._total_count = None
def calc_indices(self, values: np.ndarray):
""" Calculate the indicies for each bin
:param ndarray values:
The values to use to generate the bins
"""
if self.topk is None:
self.topk = values.shape[0] // self.n_compartments
if values.shape[0] < self.topk * self.n_compartments:
err = 'Got too few values for {} samples of {} compartments: {}'
err = err.format(self.topk, self.n_compartments, values.shape[0])
raise ValueError(err)
print(f'Spliting into {self.n_compartments} compartments of {self.topk} samples each')
# Sort all the indices
indices = np.argsort(values)
# Split into even bins of size topk
bin_start = np.floor(np.linspace(0, indices.shape[0]-self.topk, self.n_compartments))
bin_start[bin_start < 0] = 0
bin_end = bin_start + self.topk
bin_end[bin_end > indices.shape[0]] = indices.shape[0]
# Extract the sorted bins for each compartment
self._bin_indices = [indices[int(s):int(e)] for s, e in zip(bin_start, bin_end)]
def calc_bin(self,
bin_value: np.ndarray,
label: str,
total_count: int) -> Dict[str, float]:
""" Calculate all the stats for a single bin
:param ndarray bin_value:
The 2D array of n timepoints x k samples
:param str label:
The label for this category
:param int total_count:
The total number of samples in this bin
:returns:
A dictionary of bin stats for plotting
"""
bin_mean = np.nanmean(bin_value, axis=1)
bin_std = np.nanstd(bin_value, axis=1)
bin5, bin25, bin50, bin75, bin95 = np.nanpercentile(bin_value, [5, 25, 50, 75, 95], axis=1)
bin_mean_ci0, bin_mean_ci1 = bootstrap_ci(bin_value, func=np.nanmean, axis=1)
assert bin_mean_ci0.shape == bin_mean.shape
assert bin_mean_ci1.shape == bin_mean.shape
bin_median_ci0, bin_median_ci1 = bootstrap_ci(bin_value, func=np.nanmedian, axis=1)
assert bin_median_ci0.shape == bin50.shape
assert bin_median_ci0.shape == bin50.shape
# Work out how many samples/bin we have in each timepoint
bin_count = np.sum(~np.isnan(bin_value), axis=1)
bin_support = bin_count / total_count
bin_support[~np.isfinite(bin_support)] = 0
# Stash all the values for later
return {
'mean' + label: bin_mean,
'mean ci low' + label: bin_mean_ci0,
'mean ci high' + label: bin_mean_ci1,
'std' + label: bin_std,
'p5' + label: bin5,
'p25' + label: bin25,
'p50' + label: bin50,
'p50 ci low' + label: bin_median_ci0,
'p50 ci high' + label: bin_median_ci1,
'p75' + label: bin75,
'p95' + label: bin95,
'count' + label: bin_count,
'support' + label: bin_support,
}
def split_comparison(self,
data: Dict[str, np.ndarray],
xcolumn: str,
ycolumn: str,
integrate_values: bool = False):
""" Split the comparison by the bins
:param dict[str, Any] data:
A dictionary containing the xcolumn and ycolumn data
:param str xcolumn:
The column containing the shared time vector to plot along
:param str ycolumn:
The column containing the values to bin along
:param bool integrate_values:
If True, integrate the resulting statistics over the xdata range
"""
xdata = data[xcolumn]
plotdata = {
xcolumn: xdata,
}
values = np.stack(data[ycolumn], axis=1)
total_count = np.sum(~np.isnan(values), axis=1)
if values.shape[0] != xdata.shape[0]:
raise ValueError('Expected {} with shape {}, got {}'.format(ycolumn, xdata.shape[0], values.shape[0]))
bin_values = []
# Add a set for all the values
plotdata.update(self.calc_bin(values, f' {ycolumn} all', total_count))
for i, indices in enumerate(self._bin_indices):
bin_value = values[:, indices]
bin_values.append(bin_value)
label = f' {ycolumn} bin{i+1}'
plotdata.update(self.calc_bin(bin_value, label, total_count))
self._plotdata = plotdata
self._xdata = xdata
self._xcolumn = xcolumn
self._ycolumn = ycolumn
self._bin_values = bin_values
self._total_count = total_count
def calc_envelope(self, label: str, envelope: str = 'std') -> Tuple[float]:
""" Calculate the envelope (high/low) stats for a label
:param str label:
The label to calculate the envelope for
:param str envelope:
Which stats to calculate the envelope with
:returns:
A tuple of low, high values
"""
plotdata = self._plotdata
if envelope == 'std':
value_mean = plotdata['mean' + label]
value_std = plotdata['std' + label]
value_st = value_mean - value_std
value_ed = value_mean + value_std
elif envelope == 'mean ci':
value_st = plotdata['mean ci low' + label]
value_ed = plotdata['mean ci high' + label]
elif envelope == 'median ci':
value_st = plotdata['p50 ci low' + label]
value_ed = plotdata['p50 ci high' + label]
elif envelope == 'iqr':
value_st = plotdata['p25' + label]
value_ed = plotdata['p75' + label]
else:
raise ValueError('Unknown envelope function "{}"'.format(envelope))
return value_st, value_ed
def plot_raw_tracks(self, outfile=None, xlabel=None, ylabel=None):
""" Plot individual raw tracks """
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
for i, bin_value in enumerate(self._bin_values):
ax.set_prop_cycle(color=self.palletes[i])
ax.plot(self._xdata, bin_value, '-')
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_mean_tracks(self, outfile=None, xlabel=None, ylabel=None, envelope='std', mode='split'):
""" Mean and deviation envelope
:param Path outfile:
If not None, the file to write out
:param str xlabel:
Label for the x-axis (time)
:param str ylabel:
Label for the y-axis (category)
"""
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
if mode == 'split':
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
value_mean = plotdata['mean' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor=self.colors[i], alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color=self.colors[i], linewidth=2)
elif mode == 'all':
label = ' {} all'.format(self._ycolumn)
value_mean = plotdata['mean' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor='b', alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color='b', linewidth=2)
else:
raise ValueError('Unknown mode {}'.format(mode))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_median_tracks(self, outfile=None, xlabel=None, ylabel=None, envelope='iqr', mode='split'):
""" Median and 25/75% envelope """
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
if mode == 'split':
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
value_mid = plotdata['p50' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor=self.colors[i], alpha=0.5)
ax.plot(self._xdata, value_mid, '-', color=self.colors[i], linewidth=2)
elif mode == 'all':
label = ' {} all'.format(self._ycolumn)
value_mean = plotdata['p50' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor='b', alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color='b', linewidth=2)
else:
raise ValueError('Unknown mode {}'.format(mode))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_track_support(self, outfile=None, xlabel=None, ylabel=None):
""" Plot how many tracks are in a given bin at a given time """
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig_x, fig_y = self.figsize
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(fig_x*2, fig_y))
ax1.plot(self._xdata, self._total_count, '-k', linewidth=2)
ax2.hlines([100], np.min(self._xdata), np.max(self._xdata), colors=['k'], linewidth=2)
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
count = plotdata['count' + label]
support = plotdata['support' + label]
ax1.plot(self._xdata, count, '-', color=self.colors[i], linewidth=2)
ax2.plot(self._xdata, support*100, '-', color=self.colors[i], linewidth=2)
if xlabel is not None:
ax1.set_xlabel(xlabel)
ax2.set_xlabel(xlabel)
ax1.set_ylabel('Num Tracks')
ax2.set_ylabel('Percent Total Tracks')
ax1.set_ylim([0, np.max(self._total_count)*1.02])
ax2.set_ylim([0, 102])
style.show(outfile=outfile, fig=fig)
def plot_dist_histogram(self, values, outfile=None, xlabel=None, ylabel=None):
""" Plot where on the histogram each value occurs
:param ndarray values:
The values to generate a histogram for
:param Path outfile:
If not None, the path to save the plot to
"""
# Histogram the distribution and which compartments are being labeled
_, _, kernel_x, kernel_y = get_histogram(values, bins=10, kernel_smoothing=True)
compartment_values = [values[indices] for indices in self._bin_indices]
distdata = {
'compartment': [],
'value': [],
'density': [],
}
# Now, plot each compartment on the total histogram
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
ax.plot(kernel_x, kernel_y, '-', color='gray')
distdata['compartment'].extend(0 for _ in kernel_x)
distdata['value'].extend(kernel_x)
distdata['density'].extend(kernel_y)
for i, compartment_value in enumerate(compartment_values):
compartment_min = np.min(compartment_value)
compartment_max = np.max(compartment_value)
kernel_mask = np.logical_and(kernel_x >= compartment_min,
kernel_x <= compartment_max)
compartment_x = kernel_x[kernel_mask]
compartment_y = kernel_y[kernel_mask]
distdata['compartment'].extend(i+1 for _ in compartment_x)
distdata['value'].extend(compartment_x)
distdata['density'].extend(compartment_y)
ax.fill_between(compartment_x, 0, compartment_y,
facecolor=self.colors[i], alpha=0.5)
ax.plot(compartment_x, compartment_y, '-',
color=self.colors[i], linewidth=2)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
self._distdata = distdata
def save_plotdata(self, outfile, suffix='.csv'):
""" Save the plot data """
if self._plotdata is None:
raise ValueError('No distribution data, call split_comparison first')
outfile = outfile.parent / (outfile.stem + suffix)
print('Writing distribution data to {}'.format(outfile))
plotdata = pd.DataFrame(self._plotdata)
if suffix == '.csv':
plotdata.to_csv(str(outfile), header=True, index=False)
elif suffix == '.xlsx':
plotdata.to_excel(str(outfile), header=True, index=False)
else:
raise KeyError('Unknown plot data output file type: {}'.format(outfile))
def save_distdata(self, outfile, suffix='.csv'):
""" Save the distribution data """
if self._distdata is None:
raise ValueError('No distribution data, call plot_dist_histogram first')
outfile = outfile.parent / (outfile.stem + suffix)
print('Writing distribution data to {}'.format(outfile))
distdata = pd.DataFrame(self._distdata)
if suffix == '.csv':
distdata.to_csv(str(outfile), header=True, index=False)
elif suffix == '.xlsx':
distdata.to_excel(str(outfile), header=True, index=False)
else:
raise KeyError('Unknown dist data output file type: {}'.format(outfile))
| [
"numpy.nanstd",
"numpy.nanpercentile",
"numpy.logical_and",
"numpy.max",
"numpy.argsort",
"numpy.nanmean",
"numpy.stack",
"numpy.linspace",
"numpy.isnan",
"numpy.isfinite",
"numpy.min",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] | [((2280, 2298), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (2290, 2298), True, 'import numpy as np\n'), ((3264, 3293), 'numpy.nanmean', 'np.nanmean', (['bin_value'], {'axis': '(1)'}), '(bin_value, axis=1)\n', (3274, 3293), True, 'import numpy as np\n'), ((3312, 3340), 'numpy.nanstd', 'np.nanstd', (['bin_value'], {'axis': '(1)'}), '(bin_value, axis=1)\n', (3321, 3340), True, 'import numpy as np\n'), ((3385, 3441), 'numpy.nanpercentile', 'np.nanpercentile', (['bin_value', '[5, 25, 50, 75, 95]'], {'axis': '(1)'}), '(bin_value, [5, 25, 50, 75, 95], axis=1)\n', (3401, 3441), True, 'import numpy as np\n'), ((5430, 5461), 'numpy.stack', 'np.stack', (['data[ycolumn]'], {'axis': '(1)'}), '(data[ycolumn], axis=1)\n', (5438, 5461), True, 'import numpy as np\n'), ((15139, 15167), 'pandas.DataFrame', 'pd.DataFrame', (['self._plotdata'], {}), '(self._plotdata)\n', (15151, 15167), True, 'import pandas as pd\n'), ((15828, 15856), 'pandas.DataFrame', 'pd.DataFrame', (['self._distdata'], {}), '(self._distdata)\n', (15840, 15856), True, 'import pandas as pd\n'), ((2373, 2438), 'numpy.linspace', 'np.linspace', (['(0)', '(indices.shape[0] - self.topk)', 'self.n_compartments'], {}), '(0, indices.shape[0] - self.topk, self.n_compartments)\n', (2384, 2438), True, 'import numpy as np\n'), ((7661, 7701), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'self.figsize'}), '(1, 1, figsize=self.figsize)\n', (7673, 7701), True, 'import matplotlib.pyplot as plt\n'), ((8551, 8591), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'self.figsize'}), '(1, 1, figsize=self.figsize)\n', (8563, 8591), True, 'import matplotlib.pyplot as plt\n'), ((10069, 10109), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'self.figsize'}), '(1, 1, figsize=self.figsize)\n', (10081, 10109), True, 'import matplotlib.pyplot as plt\n'), ((11630, 11676), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(fig_x * 2, fig_y)'}), '(1, 2, figsize=(fig_x * 2, fig_y))\n', (11642, 11676), True, 'import matplotlib.pyplot as plt\n'), ((13410, 13450), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'self.figsize'}), '(1, 1, figsize=self.figsize)\n', (13422, 13450), True, 'import matplotlib.pyplot as plt\n'), ((3922, 3941), 'numpy.isnan', 'np.isnan', (['bin_value'], {}), '(bin_value)\n', (3930, 3941), True, 'import numpy as np\n'), ((4018, 4042), 'numpy.isfinite', 'np.isfinite', (['bin_support'], {}), '(bin_support)\n', (4029, 4042), True, 'import numpy as np\n'), ((5492, 5508), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (5500, 5508), True, 'import numpy as np\n'), ((11777, 11796), 'numpy.min', 'np.min', (['self._xdata'], {}), '(self._xdata)\n', (11783, 11796), True, 'import numpy as np\n'), ((11798, 11817), 'numpy.max', 'np.max', (['self._xdata'], {}), '(self._xdata)\n', (11804, 11817), True, 'import numpy as np\n'), ((13775, 13800), 'numpy.min', 'np.min', (['compartment_value'], {}), '(compartment_value)\n', (13781, 13800), True, 'import numpy as np\n'), ((13835, 13860), 'numpy.max', 'np.max', (['compartment_value'], {}), '(compartment_value)\n', (13841, 13860), True, 'import numpy as np\n'), ((13891, 13963), 'numpy.logical_and', 'np.logical_and', (['(kernel_x >= compartment_min)', '(kernel_x <= compartment_max)'], {}), '(kernel_x >= compartment_min, kernel_x <= compartment_max)\n', (13905, 13963), True, 'import numpy as np\n'), ((12473, 12498), 'numpy.max', 'np.max', (['self._total_count'], {}), '(self._total_count)\n', (12479, 12498), True, 'import numpy as np\n')] |
import torch
from torch import nn
import torch.optim as optim
import torch.multiprocessing as mp
import numpy as np
import time
class MPManager(object):
def __init__(self, num_workers):
"""
manage a single-instruction-multiple-data (SIMD) scheme
:param int num_workers: The number of processors to run.
"""
mp.set_start_method('spawn')
# Counting the current batch size
self.num_workers = num_workers
# A pool of processes
self.pool = mp.Pool(processes=num_workers)
def run(self, function, arguments):
"""
:param function : the instruction
:param arguments : list of things processors loop over
can be anything the function works on, e.g. model + data
"""
output_and_grads = self.pool.map(function, arguments)
return output_and_grads
| [
"torch.multiprocessing.Pool",
"torch.multiprocessing.set_start_method"
] | [((353, 381), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (372, 381), True, 'import torch.multiprocessing as mp\n'), ((513, 543), 'torch.multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'num_workers'}), '(processes=num_workers)\n', (520, 543), True, 'import torch.multiprocessing as mp\n')] |
import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D
from tensorflow.keras.models import Model
import numpy as np
class SelfAttention(Model):
def __init__(self, d_model, spatial_dims, positional_encoding=True, name="self_attention"):
'''
d_model : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
'''
super().__init__(name=name)
self.d_model = d_model
self.spatial_dims=spatial_dims
self.spatial_dim = np.prod(spatial_dims)
self.wq = Dense(self.d_model, name=name+"_q")
self.wk = Dense(self.d_model, name=name+"_k")
self.wv = Dense(self.d_model, name=name+"_w")
self.positional_encoding=positional_encoding
if positional_encoding:
self.pos_embedding = Embedding(self.spatial_dim, d_model, name=name+"pos_enc") # TODO test other positional encoding. in particular that encodes X and Y
def call(self, x):
'''
x : tensor with shape (batch_size, y, x, channels)
'''
shape = tf.shape(x)
batch_size = shape[0]
#spatial_dims = shape[1:-1]
#spatial_dim = tf.reduce_prod(spatial_dims)
depth_dim = shape[3]
if self.positional_encoding:
x_index = tf.range(self.spatial_dim, dtype=tf.int32)
pos_emb = self.pos_embedding(x_index) # (spa_dim, d_model)
pos_emb = tf.reshape(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.d_model)) #for broadcasting purpose
x = x + pos_emb # broadcast
q = self.wq(x) # (batch_size, *spa_dims, d_model)
k = self.wk(x) # (batch_size, *spa_dims, d_model)
v = self.wv(x) # (batch_size, *spa_dims, d_model)
q = tf.reshape(q, (batch_size, -1, depth_dim)) # (batch_size, spa_dim, d_model)
k = tf.reshape(k, (batch_size, -1, depth_dim))
v = tf.reshape(v, (batch_size, -1, depth_dim))
# scaled_attention.shape == (batch_size, spa_dims, depth)
# attention_weights.shape == (batch_size, spa_dims, spa_dims)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v)
output = tf.reshape(scaled_attention, (batch_size, self.spatial_dims[0], self.spatial_dims[1], self.d_model))
tf.identity(attention_weights, name=self.name+"_attention_weights")
return output, attention_weights
def compute_output_shape(self, input_shape):
return input_shape[:-1]+(self.d_model,), (input_shape[0],self.spatial_dim,self.spatial_dim)
def scaled_dot_product_attention(q, k, v):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
Returns:
output, attention_weights
from : https://www.tensorflow.org/tutorials/text/transformer
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
| [
"numpy.prod",
"tensorflow.shape",
"tensorflow.math.sqrt",
"tensorflow.keras.layers.Embedding",
"tensorflow.range",
"tensorflow.keras.layers.Dense",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.identity"
] | [((3406, 3439), 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {'transpose_b': '(True)'}), '(q, k, transpose_b=True)\n', (3415, 3439), True, 'import tensorflow as tf\n'), ((3719, 3766), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scaled_attention_logits'], {'axis': '(-1)'}), '(scaled_attention_logits, axis=-1)\n', (3732, 3766), True, 'import tensorflow as tf\n'), ((3812, 3843), 'tensorflow.matmul', 'tf.matmul', (['attention_weights', 'v'], {}), '(attention_weights, v)\n', (3821, 3843), True, 'import tensorflow as tf\n'), ((736, 757), 'numpy.prod', 'np.prod', (['spatial_dims'], {}), '(spatial_dims)\n', (743, 757), True, 'import numpy as np\n'), ((776, 813), 'tensorflow.keras.layers.Dense', 'Dense', (['self.d_model'], {'name': "(name + '_q')"}), "(self.d_model, name=name + '_q')\n", (781, 813), False, 'from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D\n'), ((830, 867), 'tensorflow.keras.layers.Dense', 'Dense', (['self.d_model'], {'name': "(name + '_k')"}), "(self.d_model, name=name + '_k')\n", (835, 867), False, 'from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D\n'), ((884, 921), 'tensorflow.keras.layers.Dense', 'Dense', (['self.d_model'], {'name': "(name + '_w')"}), "(self.d_model, name=name + '_w')\n", (889, 921), False, 'from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D\n'), ((1297, 1308), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1305, 1308), True, 'import tensorflow as tf\n'), ((1989, 2031), 'tensorflow.reshape', 'tf.reshape', (['q', '(batch_size, -1, depth_dim)'], {}), '(q, (batch_size, -1, depth_dim))\n', (1999, 2031), True, 'import tensorflow as tf\n'), ((2077, 2119), 'tensorflow.reshape', 'tf.reshape', (['k', '(batch_size, -1, depth_dim)'], {}), '(k, (batch_size, -1, depth_dim))\n', (2087, 2119), True, 'import tensorflow as tf\n'), ((2132, 2174), 'tensorflow.reshape', 'tf.reshape', (['v', '(batch_size, -1, depth_dim)'], {}), '(v, (batch_size, -1, depth_dim))\n', (2142, 2174), True, 'import tensorflow as tf\n'), ((2413, 2518), 'tensorflow.reshape', 'tf.reshape', (['scaled_attention', '(batch_size, self.spatial_dims[0], self.spatial_dims[1], self.d_model)'], {}), '(scaled_attention, (batch_size, self.spatial_dims[0], self.\n spatial_dims[1], self.d_model))\n', (2423, 2518), True, 'import tensorflow as tf\n'), ((2522, 2591), 'tensorflow.identity', 'tf.identity', (['attention_weights'], {'name': "(self.name + '_attention_weights')"}), "(attention_weights, name=self.name + '_attention_weights')\n", (2533, 2591), True, 'import tensorflow as tf\n'), ((3582, 3598), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), '(dk)\n', (3594, 3598), True, 'import tensorflow as tf\n'), ((1038, 1097), 'tensorflow.keras.layers.Embedding', 'Embedding', (['self.spatial_dim', 'd_model'], {'name': "(name + 'pos_enc')"}), "(self.spatial_dim, d_model, name=name + 'pos_enc')\n", (1047, 1097), False, 'from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D\n'), ((1516, 1558), 'tensorflow.range', 'tf.range', (['self.spatial_dim'], {'dtype': 'tf.int32'}), '(self.spatial_dim, dtype=tf.int32)\n', (1524, 1558), True, 'import tensorflow as tf\n'), ((1652, 1731), 'tensorflow.reshape', 'tf.reshape', (['pos_emb', '(self.spatial_dims[0], self.spatial_dims[1], self.d_model)'], {}), '(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.d_model))\n', (1662, 1731), True, 'import tensorflow as tf\n'), ((3511, 3522), 'tensorflow.shape', 'tf.shape', (['k'], {}), '(k)\n', (3519, 3522), True, 'import tensorflow as tf\n')] |
import csv
def save_minimal_pairs(output_filename, to_output, write_header=True):
if isinstance(output_filename, str):
outf = open(output_filename, mode='w', encoding='utf-8-sig', newline='')
needs_closed = True
else:
outf = output_filename
needs_closed = False
writer = csv.writer(outf, delimiter='\t')
if write_header:
writer.writerow(['FIRST_SEGMENT', 'SECOND_SEGMENT',
'FIRST_WORD', 'FIRST_WORD_TRANSCRIPTION',
'SECOND_WORD', 'SECOND_WORD_TRANSCRIPTION'])
for _, _, ret_dict in to_output:
for seg_pair, word_pair_set in ret_dict.items():
for word_pair in word_pair_set:
writer.writerow([seg_pair[0], seg_pair[1],
word_pair[0][0], word_pair[0][1],
word_pair[1][0], word_pair[1][1]])
if needs_closed:
outf.close()
| [
"csv.writer"
] | [((318, 350), 'csv.writer', 'csv.writer', (['outf'], {'delimiter': '"""\t"""'}), "(outf, delimiter='\\t')\n", (328, 350), False, 'import csv\n')] |
from typing import Callable
from jax import lax
from flax import linen as nn
class MultiTaskDense(nn.Module):
features: int
n_tasks: int
kernel_init: Callable = nn.initializers.lecun_normal()
bias_init: Callable = nn.initializers.zeros
@nn.compact
def __call__(self, inputs):
kernel = self.param(
"kernel", self.kernel_init, (self.n_tasks, inputs.shape[-1], self.features)
)
y = lax.dot_general(
inputs, kernel, dimension_numbers=(((2,), (1,)), ((0,), (0,)))
)
bias = self.param("bias", self.bias_init, (self.n_tasks, 1, self.features))
y = y + bias
return y
| [
"flax.linen.initializers.lecun_normal",
"jax.lax.dot_general"
] | [((175, 205), 'flax.linen.initializers.lecun_normal', 'nn.initializers.lecun_normal', ([], {}), '()\n', (203, 205), True, 'from flax import linen as nn\n'), ((442, 521), 'jax.lax.dot_general', 'lax.dot_general', (['inputs', 'kernel'], {'dimension_numbers': '(((2,), (1,)), ((0,), (0,)))'}), '(inputs, kernel, dimension_numbers=(((2,), (1,)), ((0,), (0,))))\n', (457, 521), False, 'from jax import lax\n')] |
'''
Collect results in Quantum ESPRESSO
'''
import sys
import numpy as np
from pymatgen.core import Structure
from . import structure as qe_structure
from ... import utility
from ...IO import pkl_data
from ...IO import read_input as rin
def collect_qe(current_id, work_path):
# ---------- check optimization in previous stage
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
check_opt = 'not_yet'
for line in lines:
if 'End final coordinates' in line:
check_opt = 'done'
except Exception as e:
print(e)
check_opt = 'no_file'
# ---------- obtain energy and magmom
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
energy = np.nan
for line in reversed(lines):
if line.startswith('!'):
energy = float(line.split()[-2]) # in Ry
energy = energy * utility.ry2ev / float(rin.natot) # Ry/cell --> eV/atom
break
magmom = np.nan # implemented by <NAME> 2020/10/04
for line in reversed(lines):
if line.find("total magnetization") >= 0:
muB = line.split()
magmom = float(muB[3])
break
except Exception as e:
energy = np.nan # error
magmom = np.nan # error
print(e)
print(' Structure ID {0}, could not obtain energy from {1}'.format(
current_id, rin.qe_outfile))
# ---------- collect the last structure
try:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_outfile)
if lines_cell is None:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_infile)
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_outfile)
if lines_atom is None:
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_infile)
opt_struc = qe_structure.from_lines(lines_cell, lines_atom)
# ------ opt_qe-structure
with open('./data/opt_qe-structure', 'a') as fstruc:
fstruc.write('# ID {0:d}\n'.format(current_id))
qe_structure.write(opt_struc, './data/opt_qe-structure', mode='a')
except Exception as e:
print(e)
opt_struc = None
# ---------- check
if np.isnan(energy):
opt_struc = None
if opt_struc is None:
energy = np.nan
magmom = np.nan
# ---------- return
return opt_struc, energy, magmom, check_opt
def get_energy_step_qe(energy_step_data, current_id, work_path):
'''
get energy step data in eV/atom
energy_step_data[ID][stage][step]
energy_step_data[ID][0] <-- stage 1
energy_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get energy step
energy_step = []
final_flag = False # End final coordinates
vc_flag = False # vc-relax
for line in lines:
if line.startswith('!'):
energy_step.append(line.split()[4])
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
energy_step.pop(-1)
# ------ list --> array, Ry/cell --> eV/atom
if not energy_step:
energy_step = None # if empty
print('#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
else:
energy_step = utility.ry2ev / rin.natot * np.array(energy_step,
dtype='float')
except Exception as e:
energy_step = None
print(e, '#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
# ---------- append energy_step
if energy_step_data.get(current_id) is None:
energy_step_data[current_id] = [] # initialize
energy_step_data[current_id].append(energy_step)
# ---------- save energy_step_data
pkl_data.save_energy_step(energy_step_data)
# ---------- return
return energy_step_data
def get_struc_step_qe(struc_step_data, current_id, work_path):
'''
get structure step data
# ---------- args
struc_step_data: (dict) the key is structure ID
struc_step_data[ID][stage][step]
struc_step_data[ID][0] <-- stage 1
struc_step_data[ID][1] <-- stage 2
'''
try:
struc_step = []
# ------ init struc from pwscf.in
_extract_struc_qe(work_path+rin.qe_infile, struc_step)
# ------ struc step from pwscf.out
_extract_struc_qe(work_path+rin.qe_outfile, struc_step)
# ------ delete last structure due to duplication
struc_step.pop(-1)
except Exception as e:
struc_step = None
print(e ,'#### ID: {0}: failed to parse in struc_step\n'.format(
current_id), file=sys.stderr)
# ---------- append struc_step_data
if struc_step_data.get(current_id) is None:
struc_step_data[current_id] = [] # initialize
struc_step_data[current_id].append(struc_step)
# ---------- save struc_step_data
pkl_data.save_struc_step(struc_step_data)
# ---------- return
return struc_step_data
def _extract_struc_qe(filename, struc_step):
# ---------- read a file
with open(filename, 'r') as f:
lines = f.readlines()
# ---------- extract struc
read_cell = False
read_coords = False
vc_flag = False # in case of vc-relax
for line in lines:
# ------ cell part
if read_cell:
lattice.append(line.split())
if len(lattice) == 3:
read_cell = False
lattice = np.array(lattice, dtype='float')
if 'CELL_PARAMETERS' in line:
read_cell = True
vc_flag = True
lattice = []
# ------ coords part
if read_coords:
lsplit = line.split()
species.append(lsplit[0])
coords.append(lsplit[1:])
if len(coords) == rin.natot:
read_coords = False
coords = np.array(coords, dtype='float')
# ---- gen struc
if not vc_flag: # empty lattice, use init lattice
lattice = struc_step[0].lattice
struc = Structure(lattice, species, coords)
struc_step.append(struc)
if 'ATOMIC_POSITIONS' in line:
read_coords = True
species = []
coords = []
def get_force_step_qe(force_step_data, current_id, work_path):
'''
get force step data in eV/angstrom
# ---------- args
force_step_data: (dict) the key is structure ID
force_step_data[ID][stage][step]
force_step_data[ID][0] <-- stage 1
force_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get force step
force_step = []
read_force = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if 'atom 1 type 1 force' in line:
read_force = True
force = []
if read_force:
force.append(line.split()[6:])
if len(force) == rin.natot:
read_force = False
force_step.append(utility.ry2ev / utility.bohr2ang * np.array(
force, dtype='float'))
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
force_step.pop(-1)
# ------ if empty
if len(force_step) == 0:
force_step = None
print('#### ID: {0}: failed to parse force_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
force_step = None
print(e, '#### ID: {0}: failed to parse in force_step\n'.format(
current_id), file=sys.stderr)
# ---------- append force_step
if force_step_data.get(current_id) is None:
force_step_data[current_id] = [] # initialize
force_step_data[current_id].append(force_step)
# ---------- save force_step_data
pkl_data.save_force_step(force_step_data)
# ---------- return
return force_step_data
def get_stress_step_qe(stress_step_data, current_id, work_path):
'''
get stress step data in eV/ang**3
# ---------- args
stress_step_data: (dict) the key is structure ID
stress_step_data[ID][stage][step]
stress_step_data[ID][0] <-- stage 1
stress_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get stress step
stress_step = []
read_stress = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if read_stress:
stress.append(line.split()[3:])
if len(stress) == 3:
read_stress = False
stress_step.append(utility.kbar2ev_ang3 * np.array(
stress, dtype='float'))
if 'total stress (Ry/bohr**3)' in line:
read_stress = True
stress = []
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
stress_step.pop(-1)
# ------ if empty
if len(stress_step) == 0:
stress_step = None
print('#### ID: {0}: failed to parse stress_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
stress_step = None
print(e, '#### ID: {0}: failed to parse in stress_step\n'.format(
current_id), file=sys.stderr)
# ---------- append stress_step
if stress_step_data.get(current_id) is None:
stress_step_data[current_id] = [] # initialize
stress_step_data[current_id].append(stress_step)
# ---------- save stress_step_data
pkl_data.save_stress_step(stress_step_data)
# ---------- return
return stress_step_data
| [
"pymatgen.core.Structure",
"numpy.array",
"numpy.isnan"
] | [((2478, 2494), 'numpy.isnan', 'np.isnan', (['energy'], {}), '(energy)\n', (2486, 2494), True, 'import numpy as np\n'), ((3957, 3993), 'numpy.array', 'np.array', (['energy_step'], {'dtype': '"""float"""'}), "(energy_step, dtype='float')\n", (3965, 3993), True, 'import numpy as np\n'), ((6166, 6198), 'numpy.array', 'np.array', (['lattice'], {'dtype': '"""float"""'}), "(lattice, dtype='float')\n", (6174, 6198), True, 'import numpy as np\n'), ((6583, 6614), 'numpy.array', 'np.array', (['coords'], {'dtype': '"""float"""'}), "(coords, dtype='float')\n", (6591, 6614), True, 'import numpy as np\n'), ((6793, 6828), 'pymatgen.core.Structure', 'Structure', (['lattice', 'species', 'coords'], {}), '(lattice, species, coords)\n', (6802, 6828), False, 'from pymatgen.core import Structure\n'), ((7997, 8027), 'numpy.array', 'np.array', (['force'], {'dtype': '"""float"""'}), "(force, dtype='float')\n", (8005, 8027), True, 'import numpy as np\n'), ((9976, 10007), 'numpy.array', 'np.array', (['stress'], {'dtype': '"""float"""'}), "(stress, dtype='float')\n", (9984, 10007), True, 'import numpy as np\n')] |
import src.sudoku_solver as sudoku_solver
from src.sudoku import Sudoku
correct_sudoku = Sudoku([[9, 5, 7, 6, 1, 3, 2, 8, 4], [4, 8, 3, 2, 5, 7, 1, 9, 6], [6, 1, 2, 8, 4, 9, 5, 3, 7],
[1, 7, 8, 3, 6, 4, 9, 5, 2], [5, 2, 4, 9, 7, 1, 3, 6, 8], [3, 6, 9, 5, 2, 8, 7, 4, 1],
[8, 4, 5, 7, 9, 2, 6, 1, 3], [2, 9, 1, 4, 3, 6, 8, 7, 5], [7, 3, 6, 1, 8, 5, 4, 2, 9]])
starting_sudoku = Sudoku([[0, 0, 0, 0, 0, 0, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0], [6, 0, 2, 0, 0, 0, 5, 0, 0],
[0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]])
starting_sudoku_current_cell_test = Sudoku([[1, 3, 4, 5, 6, 7, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0],
[6, 0, 2, 0, 0, 0, 5, 0, 0],
[0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0],
[0, 0, 6, 0, 0, 0, 0, 0, 0]])
starting_sudoku_current_cell_test2 = Sudoku([[1, 1, 1, 1, 1, 1, 2, 1, 1], [1, 8, 1, 1, 1, 7, 1, 9, 1],
[6, 0, 2, 0, 0, 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0],
[0, 0, 0, 9, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0],
[0, 0, 6, 0, 0, 0, 0, 0, 0]])
| [
"src.sudoku.Sudoku"
] | [((90, 371), 'src.sudoku.Sudoku', 'Sudoku', (['[[9, 5, 7, 6, 1, 3, 2, 8, 4], [4, 8, 3, 2, 5, 7, 1, 9, 6], [6, 1, 2, 8, 4, \n 9, 5, 3, 7], [1, 7, 8, 3, 6, 4, 9, 5, 2], [5, 2, 4, 9, 7, 1, 3, 6, 8],\n [3, 6, 9, 5, 2, 8, 7, 4, 1], [8, 4, 5, 7, 9, 2, 6, 1, 3], [2, 9, 1, 4, \n 3, 6, 8, 7, 5], [7, 3, 6, 1, 8, 5, 4, 2, 9]]'], {}), '([[9, 5, 7, 6, 1, 3, 2, 8, 4], [4, 8, 3, 2, 5, 7, 1, 9, 6], [6, 1, 2,\n 8, 4, 9, 5, 3, 7], [1, 7, 8, 3, 6, 4, 9, 5, 2], [5, 2, 4, 9, 7, 1, 3, 6,\n 8], [3, 6, 9, 5, 2, 8, 7, 4, 1], [8, 4, 5, 7, 9, 2, 6, 1, 3], [2, 9, 1,\n 4, 3, 6, 8, 7, 5], [7, 3, 6, 1, 8, 5, 4, 2, 9]])\n', (96, 371), False, 'from src.sudoku import Sudoku\n'), ((429, 710), 'src.sudoku.Sudoku', 'Sudoku', (['[[0, 0, 0, 0, 0, 0, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0], [6, 0, 2, 0, 0, \n 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 2, 0, 0, 4, 0], [0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, \n 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0], [6, 0, 2,\n 0, 0, 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0,\n 0], [0, 0, 0, 0, 2, 0, 0, 4, 0], [0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0,\n 4, 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]])\n', (435, 710), False, 'from src.sudoku import Sudoku\n'), ((788, 1069), 'src.sudoku.Sudoku', 'Sudoku', (['[[1, 3, 4, 5, 6, 7, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0], [6, 0, 2, 0, 0, \n 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 2, 0, 0, 4, 0], [0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, \n 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]]'], {}), '([[1, 3, 4, 5, 6, 7, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0], [6, 0, 2,\n 0, 0, 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0,\n 0], [0, 0, 0, 0, 2, 0, 0, 4, 0], [0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0,\n 4, 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]])\n', (794, 1069), False, 'from src.sudoku import Sudoku\n'), ((1316, 1597), 'src.sudoku.Sudoku', 'Sudoku', (['[[1, 1, 1, 1, 1, 1, 2, 1, 1], [1, 8, 1, 1, 1, 7, 1, 9, 1], [6, 0, 2, 0, 0, \n 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 2, 0, 0, 4, 0], [0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, \n 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]]'], {}), '([[1, 1, 1, 1, 1, 1, 2, 1, 1], [1, 8, 1, 1, 1, 7, 1, 9, 1], [6, 0, 2,\n 0, 0, 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0,\n 0], [0, 0, 0, 0, 2, 0, 0, 4, 0], [0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0,\n 4, 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]])\n', (1322, 1597), False, 'from src.sudoku import Sudoku\n')] |
"""Serializers Alquileres"""
#Django REST Framework
from rest_framework import serializers
#Model
from maquinaria.alquileres.models import Alquiler
from maquinaria.maquinas.models import Maquina
class AlquilerModelSerializer(serializers.ModelSerializer):
"""Modelo Serializer de Cliente"""
class Meta:
"""Clase Meta"""
model = Alquiler
fields = (
'id', 'cliente',
'maquina', 'fecha_inicio',
'fecha_final', 'precio_alquiler'
)
class Update(serializers.Serializer):
def save(self):
maquina=Maquina.objects.get(id=1)
maquina.estado=False
maquina.save()
| [
"maquinaria.maquinas.models.Maquina.objects.get"
] | [((520, 545), 'maquinaria.maquinas.models.Maquina.objects.get', 'Maquina.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (539, 545), False, 'from maquinaria.maquinas.models import Maquina\n')] |
import numpy as np
def projective(coords):
""" Convert 2D cartesian coordinates to homogeneus/projective. """
num = np.shape(coords)[0]
w = np.array([[1], ]*num)
return np.append(coords, w, axis=1)
def cartesian(coords):
""" Convert 2D homogeneus/projective coordinates to cartesian. """
return coords[:, :2]
def translate(x, y):
""" Return translation matrix. """
return np.array([
[1, 0, x],
[0, 1, y],
[0, 0, 1],
])
def rotate(a):
""" Return rotation matrix. """
return np.array([
[np.cos(a), -np.sin(a), 0],
[np.sin(a), np.cos(a), 0],
[0, 0, 1]
])
def transform_list(coords, matrix):
""" Apply transformation to a list of coordinates. """
return matrix.dot(coords.T).T
def transform_apply(coords, transforms):
""" Apply list of transformations to a list of coordinates. """
out = projective(coords)
for transform in transforms:
out = transform_list(out, transform)
return cartesian(out)
| [
"numpy.append",
"numpy.array",
"numpy.cos",
"numpy.sin",
"numpy.shape"
] | [((154, 175), 'numpy.array', 'np.array', (['([[1]] * num)'], {}), '([[1]] * num)\n', (162, 175), True, 'import numpy as np\n'), ((188, 216), 'numpy.append', 'np.append', (['coords', 'w'], {'axis': '(1)'}), '(coords, w, axis=1)\n', (197, 216), True, 'import numpy as np\n'), ((411, 454), 'numpy.array', 'np.array', (['[[1, 0, x], [0, 1, y], [0, 0, 1]]'], {}), '([[1, 0, x], [0, 1, y], [0, 0, 1]])\n', (419, 454), True, 'import numpy as np\n'), ((126, 142), 'numpy.shape', 'np.shape', (['coords'], {}), '(coords)\n', (134, 142), True, 'import numpy as np\n'), ((571, 580), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (577, 580), True, 'import numpy as np\n'), ((607, 616), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (613, 616), True, 'import numpy as np\n'), ((618, 627), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (624, 627), True, 'import numpy as np\n'), ((583, 592), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (589, 592), True, 'import numpy as np\n')] |
from django.db import models
from django.utils import timezone
import os
#BLOGS
class BlogPost(models.Model):
author=models.CharField(max_length=200)
role=models.CharField(max_length=200)
image=models.ImageField(upload_to='blogMedia/')
title=models.CharField(max_length=200)
displayText=models.TextField()
body=models.TextField()
date=models.DateTimeField(default=timezone.now)
def delete(self):
self.image.storage.delete(str(self.image))
super(BlogPost, self).delete()
def __str__(self):
return self.title+' | '+self.author
class BlogPostComment(models.Model):
post = models.ForeignKey(BlogPost, related_name='comments',on_delete=models.CASCADE)
author = models.CharField(max_length=200)
comment = models.TextField()
date = models.DateTimeField(auto_now_add=True)
approved =models.BooleanField(default=False)
def __str__(self):
return str(self.post.title)+' | '+str(self.author)+': '+str(self.comment)
#News
class New(models.Model):
image=models.ImageField(upload_to='newsMedia')
title = models.CharField(max_length=200)
subtitle = models.CharField(max_length=200)
source = models.CharField(max_length=500)
date=models.DateTimeField(auto_now_add=True)
body=models.TextField()
def delete(self):
self.image.storage.delete(str(self.image))
super(New, self).delete()
def __str__(self):
return str(self.title)+' | '+str(self.date)
#EVENT
class Event(models.Model):
name = models.CharField(max_length=200)
date = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=200)
bannerimage=models.ImageField(upload_to='eventMedia')
image1=models.ImageField(upload_to='eventMedia', blank=True)
image2=models.ImageField(upload_to='eventMedia', blank=True)
image3 = models.ImageField(upload_to='eventMedia', blank=True)
image4 = models.ImageField(upload_to='eventMedia', blank=True)
body = models.TextField()
def delete(self):
self.bannerimage.storage.delete(str(self.bannerimage))
self.image1.storage.delete(str(self.image1))
self.image2.storage.delete(str(self.image2))
self.image3.storage.delete(str(self.image3))
self.image4.storage.delete(str(self.image4))
super(Event, self).delete()
def __str__(self):
return str(self.name) | [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((125, 157), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (141, 157), False, 'from django.db import models\n'), ((167, 199), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (183, 199), False, 'from django.db import models\n'), ((210, 251), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""blogMedia/"""'}), "(upload_to='blogMedia/')\n", (227, 251), False, 'from django.db import models\n'), ((262, 294), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (278, 294), False, 'from django.db import models\n'), ((311, 329), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (327, 329), False, 'from django.db import models\n'), ((339, 357), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (355, 357), False, 'from django.db import models\n'), ((367, 409), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (387, 409), False, 'from django.db import models\n'), ((640, 718), 'django.db.models.ForeignKey', 'models.ForeignKey', (['BlogPost'], {'related_name': '"""comments"""', 'on_delete': 'models.CASCADE'}), "(BlogPost, related_name='comments', on_delete=models.CASCADE)\n", (657, 718), False, 'from django.db import models\n'), ((731, 763), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (747, 763), False, 'from django.db import models\n'), ((778, 796), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (794, 796), False, 'from django.db import models\n'), ((808, 847), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (828, 847), False, 'from django.db import models\n'), ((862, 896), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (881, 896), False, 'from django.db import models\n'), ((1052, 1092), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""newsMedia"""'}), "(upload_to='newsMedia')\n", (1069, 1092), False, 'from django.db import models\n'), ((1105, 1137), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1121, 1137), False, 'from django.db import models\n'), ((1153, 1185), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1169, 1185), False, 'from django.db import models\n'), ((1199, 1231), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1215, 1231), False, 'from django.db import models\n'), ((1241, 1280), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1261, 1280), False, 'from django.db import models\n'), ((1290, 1308), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1306, 1308), False, 'from django.db import models\n'), ((1542, 1574), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1558, 1574), False, 'from django.db import models\n'), ((1586, 1625), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1606, 1625), False, 'from django.db import models\n'), ((1638, 1670), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1654, 1670), False, 'from django.db import models\n'), ((1687, 1728), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""eventMedia"""'}), "(upload_to='eventMedia')\n", (1704, 1728), False, 'from django.db import models\n'), ((1740, 1793), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""eventMedia"""', 'blank': '(True)'}), "(upload_to='eventMedia', blank=True)\n", (1757, 1793), False, 'from django.db import models\n'), ((1805, 1858), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""eventMedia"""', 'blank': '(True)'}), "(upload_to='eventMedia', blank=True)\n", (1822, 1858), False, 'from django.db import models\n'), ((1872, 1925), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""eventMedia"""', 'blank': '(True)'}), "(upload_to='eventMedia', blank=True)\n", (1889, 1925), False, 'from django.db import models\n'), ((1939, 1992), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""eventMedia"""', 'blank': '(True)'}), "(upload_to='eventMedia', blank=True)\n", (1956, 1992), False, 'from django.db import models\n'), ((2004, 2022), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2020, 2022), False, 'from django.db import models\n')] |
import sys
import typing
def main() -> typing.NoReturn:
n = int(input())
(*t,) = map(int, sys.stdin.read().split())
print(min(t))
main()
| [
"sys.stdin.read"
] | [((106, 122), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (120, 122), False, 'import sys\n')] |
#!/usr/bin/env python
from setuptools import setup
setup(name='django-s3file', use_scm_version=True)
| [
"setuptools.setup"
] | [((52, 101), 'setuptools.setup', 'setup', ([], {'name': '"""django-s3file"""', 'use_scm_version': '(True)'}), "(name='django-s3file', use_scm_version=True)\n", (57, 101), False, 'from setuptools import setup\n')] |
import argparse
import os
import math
# import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import progressbar
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import test_utils
from waymo_open_dataset.utils import box_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
from adapter_lib import *
import pdb
############################Config###########################################
# path to waymo dataset "folder" (all .tfrecord files in that folder will
# be converted)
DATA_PATH = '/media/trail/harddrive/datasets/Waymo/original/validation'
# path to save kitti dataset
KITTI_PATH = '/media/trail/harddrive/datasets/Waymo/waymo/validation'
# location filter, use this to convert your preferred location
LOCATION_FILTER = False
LOCATION_NAME = ['location_sf']
# max indexing length
INDEX_LENGTH = 15
# as name
IMAGE_FORMAT = 'png'
# do not change
LABEL_PATH = KITTI_PATH + '/label_0'
LABEL_ALL_PATH = KITTI_PATH + '/label_all'
IMAGE_PATH = KITTI_PATH + '/image_0'
CALIB_PATH = KITTI_PATH + '/calib'
LIDAR_PATH = KITTI_PATH + '/velodyne'
IMG_CALIB_PATH = KITTI_PATH + '/img_calib'
###############################################################################
class Adapter:
def __init__(self):
self.__lidar_list = ['_FRONT', '_FRONT_RIGHT',
'_FRONT_LEFT', '_SIDE_RIGHT', '_SIDE_LEFT']
self.__type_list = ['UNKNOWN', 'VEHICLE',
'PEDESTRIAN', 'SIGN', 'CYCLIST']
self.__file_names = []
self.T_front_cam_to_ref = []
self.T_vehicle_to_front_cam = []
def cvt(self, args, folder, start_ind):
""" convert dataset from Waymo to KITTI
Args:
return:
"""
self.start_ind = start_ind
self.get_file_names(DATA_PATH + '/' + folder)
print("Converting ..." + folder)
self.create_folder(args.camera_type)
bar = progressbar.ProgressBar(maxval=len(self.__file_names) + 1,
widgets=[progressbar.Percentage(), ' ',
progressbar.Bar(
marker='>', left='[', right=']'), ' ',
progressbar.ETA()])
tf.enable_eager_execution()
file_num = 1
frame_num = 0
frame_name = self.start_ind
label_exists = False
print("start converting ...")
bar.start()
for file_idx, file_name in enumerate(self.__file_names):
print('File {}/{}'.format(file_idx, len(self.__file_names)))
dataset = tf.data.TFRecordDataset(file_name, compression_type='')
for data in dataset:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
if (frame_num % args.keyframe) == 0:
if LOCATION_FILTER == True and frame.context.stats.location not in LOCATION_NAME:
continue
if args.test == False:
label_exists = self.save_label(frame, frame_name, args.camera_type, False, True)
if args.test == label_exists:
frame_num += 1
continue
self.save_calib(frame, frame_name)
self.save_label(
frame, frame_name, args.camera_type)
self.save_image(frame, frame_name, args.camera_type)
self.save_lidar(frame, frame_name)
self.save_image_calib(frame, frame_name)
# print("image:{}\ncalib:{}\nlidar:{}\nlabel:{}\n".format(str(s1-e1),str(s2-e2),str(s3-e3),str(s4-e4)))
frame_name += 1
frame_num += 1
bar.update(file_num)
file_num += 1
bar.finish()
print("\nfinished ...")
return frame_name
def save_image(self, frame, frame_num, cam_type):
""" parse and save the images in png format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
for img in frame.images:
if cam_type == 'all' or cam_type == str(img.name - 1):
img_path = IMAGE_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.' + IMAGE_FORMAT
img = cv2.imdecode(np.frombuffer(
img.image, np.uint8), cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
plt.imsave(img_path, rgb_img, format=IMAGE_FORMAT)
def save_calib(self, frame, frame_num, kitti_format=True):
""" parse and save the calibration data
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
fp_calib = open(CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
self.T_front_cam_to_ref = np.array([
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[1.0, 0.0, 0.0]
])
camera_calib = []
R0_rect = ["%e" % i for i in np.eye(3).flatten()]
Tr_velo_to_cam = []
calib_context = ''
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = self.cart_to_homo(self.T_front_cam_to_ref) @ np.linalg.inv(tmp)
Tr_velo_to_cam.append(["%e" % i for i in tmp[:3,:].reshape(12)])
for cam in frame.context.camera_calibrations:
tmp = np.zeros((3, 4))
tmp[0, 0] = cam.intrinsic[0]
tmp[1, 1] = cam.intrinsic[1]
tmp[0, 2] = cam.intrinsic[2]
tmp[1, 2] = cam.intrinsic[3]
tmp[2, 2] = 1
tmp = list(tmp.reshape(12))
tmp = ["%e" % i for i in tmp]
camera_calib.append(tmp)
T_front_cam_to_vehicle = np.array(frame.context.camera_calibrations[0].extrinsic.transform).reshape(4, 4)
self.T_vehicle_to_front_cam = np.linalg.inv(T_front_cam_to_vehicle)
for i in range(5):
calib_context += "P" + str(i) + ": " + \
" ".join(camera_calib[i]) + '\n'
calib_context += "R0_rect" + ": " + " ".join(R0_rect) + '\n'
for i in range(5):
calib_context += "Tr_velo_to_cam_" + \
str(i) + ": " + " ".join(Tr_velo_to_cam[i]) + '\n'
calib_context += "timestamp_micros: " + \
str(frame.timestamp_micros) + '\n'
calib_context += "context_name: " + str(frame.context.name) + '\n'
fp_calib.write(calib_context)
fp_calib.close()
def save_lidar(self, frame, frame_num):
""" parse and save the lidar data in psd format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = np.concatenate(points, axis=0)
intensity_all = np.concatenate(intensity, axis=0)
point_cloud = np.column_stack((points_all, intensity_all))
pc_path = LIDAR_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.bin'
point_cloud.tofile(pc_path)
def save_label(self, frame, frame_num, cam_type, kitti_format=False, check_label_exists = False):
""" parse and save the label data in .txt format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
# get point cloud in the frame
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = tf.convert_to_tensor(
np.concatenate(points, axis=0), dtype=np.float32)
# preprocess bounding box data
id_to_bbox = dict()
id_to_name = dict()
for labels in frame.projected_lidar_labels:
name = labels.name
for label in labels.labels:
bbox = [label.box.center_x - label.box.length / 2, label.box.center_y - label.box.width / 2,
label.box.center_x + label.box.length / 2, label.box.center_y + label.box.width / 2]
id_to_bbox[label.id] = bbox
id_to_name[label.id] = name - 1
Tr_velo_to_cam = []
recorded_label = []
label_lines = ''
label_all_lines = ''
"""
if kitti_format:
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = np.linalg.inv(tmp)
axes_transformation = np.array([[0, -1, 0, 0],
[0, 0, -1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]])
tmp = np.matmul(axes_transformation, tmp)
Tr_velo_to_cam.append(tmp)
"""
for obj in frame.laser_labels:
# caculate bounding box
bounding_box = None
name = None
id = obj.id
for lidar in self.__lidar_list:
if id + lidar in id_to_bbox:
bounding_box = id_to_bbox.get(id + lidar)
name = str(id_to_name.get(id + lidar))
break
if bounding_box == None or name == None:
continue
box = tf.convert_to_tensor(
[obj.box.center_x, obj.box.center_y, obj.box.center_z, obj.box.length, obj.box.width, obj.box.height, obj.box.heading], dtype=np.float32)
box = tf.reshape(box, (1, 7))
num_points = box_utils.compute_num_points_in_box_3d(
points_all, box)
num_points = num_points.numpy()[0]
detection_difficulty = obj.detection_difficulty_level
my_type = self.__type_list[obj.type]
truncated = 0
occluded = 0
height = obj.box.height
width = obj.box.width
length = obj.box.length
x = obj.box.center_x
y = obj.box.center_y
z = obj.box.center_z - height/2
if check_label_exists == False:
pt_ref = self.cart_to_homo(self.T_front_cam_to_ref) @ self.T_vehicle_to_front_cam @ np.array([x,y,z,1]).reshape((4,1))
x, y, z, _ = pt_ref.flatten().tolist()
rotation_y = -obj.box.heading - np.pi/2
beta = math.atan2(x, z)
alpha = (rotation_y + beta - math.pi / 2) % (2 * math.pi)
# save the labels
line = my_type + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(round(truncated, 2),
occluded,
round(
alpha, 2),
round(
bounding_box[0], 2),
round(
bounding_box[1], 2),
round(
bounding_box[2], 2),
round(
bounding_box[3], 2),
round(
height, 2),
round(
width, 2),
round(
length, 2),
round(
x, 2),
round(
y, 2),
round(
z, 2),
round(
rotation_y, 2),
num_points,
detection_difficulty)
line_all = line[:-1] + ' ' + name + '\n'
# store the label
label_all_lines += line_all
if (name == cam_type):
label_lines += line
recorded_label.append(line)
if len(recorded_label) == 0:
return False
else:
fp_label_all = open(LABEL_ALL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label = open(LABEL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label.write(label_lines)
fp_label.close()
fp_label_all.write(label_all_lines)
fp_label_all.close()
return True
def save_image_calib(self, frame, frame_num):
fp_image_calib = open(IMG_CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
camera_calib = []
pose = []
velocity = []
timestamp = []
shutter = []
trigger_time = []
readout_done_time = []
calib_context = ''
for camera in frame.images:
tmp = np.array(camera.pose.transform).reshape((16,))
pose.append(["%e" % i for i in tmp])
tmp = np.zeros(6)
tmp[0] = camera.velocity.v_x
tmp[1] = camera.velocity.v_y
tmp[2] = camera.velocity.v_z
tmp[3] = camera.velocity.w_x
tmp[4] = camera.velocity.w_y
tmp[5] = camera.velocity.w_z
velocity.append(["%e" % i for i in tmp])
timestamp.append(camera.pose_timestamp)
shutter.append(camera.shutter)
trigger_time.append(camera.camera_trigger_time)
readout_done_time.append(camera.camera_readout_done_time)
for i in range(5):
calib_context += "Pose_" + str(i) + ": " + \
" ".join(pose[i]) + '\n'
for i in range(5):
calib_context += "Velocity_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Timestamp_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Shutter_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Trigger_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Readout_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
fp_image_calib.write(calib_context)
fp_image_calib.close()
def get_file_names(self, folder):
for i in os.listdir(folder):
if i.split('.')[-1] == 'tfrecord':
self.__file_names.append(folder + '/' + i)
def cart_to_homo(self, mat):
ret = np.eye(4)
if mat.shape == (3, 3):
ret[:3, :3] = mat
elif mat.shape == (3, 4):
ret[:3, :] = mat
else:
raise ValueError(mat.shape)
return ret
def create_folder(self, cam_type):
if not os.path.exists(KITTI_PATH):
os.mkdir(KITTI_PATH)
if not os.path.exists(CALIB_PATH):
os.mkdir(CALIB_PATH)
if not os.path.exists(LIDAR_PATH):
os.mkdir(LIDAR_PATH)
if not os.path.exists(LABEL_ALL_PATH):
os.mkdir(LABEL_ALL_PATH)
if not os.path.exists(IMG_CALIB_PATH):
os.mkdir(IMG_CALIB_PATH)
if not os.path.exists(IMAGE_PATH):
os.mkdir(IMAGE_PATH)
if not os.path.exists(LABEL_PATH):
os.mkdir(LABEL_PATH)
def extract_intensity(self, frame, range_images, lidar_num):
""" extract the intensity from the original range image
:param frame: open dataset frame proto
:param frame_num: the current frame number
:param lidar_num: the number of current lidar
:return:
"""
intensity_0 = np.array(range_images[lidar_num][0].data).reshape(-1, 4)
intensity_0 = intensity_0[:, 1]
intensity_1 = np.array(range_images[lidar_num][
1].data).reshape(-1, 4)[:, 1]
return intensity_0, intensity_1
def image_show(self, data, name, layout, cmap=None):
"""Show an image."""
plt.subplot(*layout)
plt.imshow(tf.image.decode_jpeg(data), cmap=cmap)
plt.title(name)
plt.grid(False)
plt.axis('off')
def parse_range_image_and_camera_projection(self, frame):
"""Parse range images and camera projections given a frame.
Args:
frame: open dataset frame proto
Returns:
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
"""
self.__range_images = {}
# camera_projections = {}
# range_image_top_pose = None
for laser in frame.lasers:
if len(laser.ri_return1.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name] = [ri]
if laser.name == open_dataset.LaserName.TOP:
range_image_top_pose_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
range_image_top_pose = open_dataset.MatrixFloat()
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy()))
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return1.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name] = [cp]
if len(laser.ri_return2.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return2.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name].append(ri)
#
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return2.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name].append(cp)
return self.__range_images, range_image_top_pose
def plot_range_image_helper(self, data, name, layout, vmin=0, vmax=1, cmap='gray'):
"""Plots range image.
Args:
data: range image data
name: the image title
layout: plt layout
vmin: minimum value of the passed data
vmax: maximum value of the passed data
cmap: color map
"""
plt.subplot(*layout)
plt.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(name)
plt.grid(False)
plt.axis('off')
def get_range_image(self, laser_name, return_index):
"""Returns range image given a laser name and its return index."""
return self.__range_images[laser_name][return_index]
def show_range_image(self, range_image, layout_index_start=1):
"""Shows range image.
Args:
range_image: the range image data from a given lidar of type MatrixFloat.
layout_index_start: layout offset
"""
range_image_tensor = tf.convert_to_tensor(range_image.data)
range_image_tensor = tf.reshape(
range_image_tensor, range_image.shape.dims)
lidar_image_mask = tf.greater_equal(range_image_tensor, 0)
range_image_tensor = tf.where(lidar_image_mask, range_image_tensor,
tf.ones_like(range_image_tensor) * 1e10)
range_image_range = range_image_tensor[..., 0]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
self.plot_range_image_helper(range_image_range.numpy(), 'range',
[8, 1, layout_index_start], vmax=75, cmap='gray')
self.plot_range_image_helper(range_image_intensity.numpy(), 'intensity',
[8, 1, layout_index_start + 1], vmax=1.5, cmap='gray')
self.plot_range_image_helper(range_image_elongation.numpy(), 'elongation',
[8, 1, layout_index_start + 2], vmax=1.5, cmap='gray')
def convert_range_image_to_point_cloud(self, frame, range_images, range_image_top_pose, ri_index=0):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
intensity: {[N, 1]} list of intensity of length 5 (number of lidars).
"""
calibrations = sorted(
frame.context.laser_calibrations, key=lambda c: c.name)
# lasers = sorted(frame.lasers, key=lambda laser: laser.name)
points = []
# cp_points = []
intensity = []
frame_pose = tf.convert_to_tensor(
np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[...,
0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[
..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0:
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min,
c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == open_dataset.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(
beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
intensity_tensor = tf.gather_nd(range_image_tensor,
tf.where(range_image_mask))
# cp = camera_projections[c.name][0]
# cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
# cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points.append(points_tensor.numpy())
# cp_points.append(cp_points_tensor.numpy())
intensity.append(intensity_tensor.numpy()[:, 1])
return points, intensity
def rgba(self, r):
"""Generates a color based on range.
Args:
r: the range value of a given point.
Returns:
The color for a given range
"""
c = plt.get_cmap('jet')((r % 20.0) / 20.0)
c = list(c)
c[-1] = 0.5 # alpha
return c
def plot_image(self, camera_image):
"""Plot a cmaera image."""
plt.figure(figsize=(20, 12))
plt.imshow(tf.image.decode_jpeg(camera_image.image))
plt.grid("off")
def plot_points_on_image(self, projected_points, camera_image, rgba_func, point_size=5.0):
"""Plots points on a camera image.
Args:
projected_points: [N, 3] numpy array. The inner dims are
[camera_x, camera_y, range].
camera_image: jpeg encoded camera image.
rgba_func: a function that generates a color from a range value.
point_size: the point size.
"""
self.plot_image(camera_image)
xs = []
ys = []
colors = []
for point in projected_points:
xs.append(point[0]) # width, col
ys.append(point[1]) # height, row
colors.append(rgba_func(point[2]))
plt.scatter(xs, ys, c=colors, s=point_size, edgecolors="none")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Save Waymo dataset into Kitti format')
parser.add_argument('--keyframe',
type=int,
default=10,
help='Saves every specified # of scenes. Default is 1 and the program saves every scene')
parser.add_argument('--camera_type',
type=str,
default="0",
help='Select camera views to save. Input argument from 0 to 4 or all')
parser.add_argument('--start_ind',
type=int,
default=0,
help='File number starts counting from this index')
parser.add_argument('--test',
type=bool,
default=False,
help='if true, does not save any ground truth data')
args = parser.parse_args()
start_ind = args.start_ind
path, dirs, files = next(os.walk(DATA_PATH))
dirs.sort()
for directory in dirs:
adapter = Adapter()
last_ind = adapter.cvt(args, directory, start_ind)
start_ind = last_ind
| [
"matplotlib.pyplot.grid",
"tensorflow.enable_eager_execution",
"numpy.column_stack",
"numpy.array",
"progressbar.Percentage",
"tensorflow.ones_like",
"os.walk",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.mkdir",
"numpy.concatenate",
"matplotl... | [((27285, 27360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Save Waymo dataset into Kitti format"""'}), "(description='Save Waymo dataset into Kitti format')\n", (27308, 27360), False, 'import argparse\n'), ((2400, 2427), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (2425, 2427), True, 'import tensorflow as tf\n'), ((5226, 5289), 'numpy.array', 'np.array', (['[[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]]'], {}), '([[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]])\n', (5234, 5289), True, 'import numpy as np\n'), ((6314, 6351), 'numpy.linalg.inv', 'np.linalg.inv', (['T_front_cam_to_vehicle'], {}), '(T_front_cam_to_vehicle)\n', (6327, 6351), True, 'import numpy as np\n'), ((7471, 7501), 'numpy.concatenate', 'np.concatenate', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (7485, 7501), True, 'import numpy as np\n'), ((7526, 7559), 'numpy.concatenate', 'np.concatenate', (['intensity'], {'axis': '(0)'}), '(intensity, axis=0)\n', (7540, 7559), True, 'import numpy as np\n'), ((7582, 7626), 'numpy.column_stack', 'np.column_stack', (['(points_all, intensity_all)'], {}), '((points_all, intensity_all))\n', (7597, 7626), True, 'import numpy as np\n'), ((15130, 15148), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (15140, 15148), False, 'import os\n'), ((15304, 15313), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (15310, 15313), True, 'import numpy as np\n'), ((16826, 16846), 'matplotlib.pyplot.subplot', 'plt.subplot', (['*layout'], {}), '(*layout)\n', (16837, 16846), True, 'import matplotlib.pyplot as plt\n'), ((16913, 16928), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (16922, 16928), True, 'import matplotlib.pyplot as plt\n'), ((16937, 16952), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (16945, 16952), True, 'import matplotlib.pyplot as plt\n'), ((16961, 16976), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (16969, 16976), True, 'import matplotlib.pyplot as plt\n'), ((19991, 20011), 'matplotlib.pyplot.subplot', 'plt.subplot', (['*layout'], {}), '(*layout)\n', (20002, 20011), True, 'import matplotlib.pyplot as plt\n'), ((20020, 20069), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data'], {'cmap': 'cmap', 'vmin': 'vmin', 'vmax': 'vmax'}), '(data, cmap=cmap, vmin=vmin, vmax=vmax)\n', (20030, 20069), True, 'import matplotlib.pyplot as plt\n'), ((20078, 20093), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (20087, 20093), True, 'import matplotlib.pyplot as plt\n'), ((20102, 20117), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (20110, 20117), True, 'import matplotlib.pyplot as plt\n'), ((20126, 20141), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (20134, 20141), True, 'import matplotlib.pyplot as plt\n'), ((20617, 20655), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['range_image.data'], {}), '(range_image.data)\n', (20637, 20655), True, 'import tensorflow as tf\n'), ((20685, 20739), 'tensorflow.reshape', 'tf.reshape', (['range_image_tensor', 'range_image.shape.dims'], {}), '(range_image_tensor, range_image.shape.dims)\n', (20695, 20739), True, 'import tensorflow as tf\n'), ((20780, 20819), 'tensorflow.greater_equal', 'tf.greater_equal', (['range_image_tensor', '(0)'], {}), '(range_image_tensor, 0)\n', (20796, 20819), True, 'import tensorflow as tf\n'), ((23160, 23310), 'waymo_open_dataset.utils.transform_utils.get_rotation_matrix', 'transform_utils.get_rotation_matrix', (['range_image_top_pose_tensor[..., 0]', 'range_image_top_pose_tensor[..., 1]', 'range_image_top_pose_tensor[..., 2]'], {}), '(range_image_top_pose_tensor[..., 0],\n range_image_top_pose_tensor[..., 1], range_image_top_pose_tensor[..., 2])\n', (23195, 23310), False, 'from waymo_open_dataset.utils import transform_utils\n'), ((23510, 23622), 'waymo_open_dataset.utils.transform_utils.get_transform', 'transform_utils.get_transform', (['range_image_top_pose_tensor_rotation', 'range_image_top_pose_tensor_translation'], {}), '(range_image_top_pose_tensor_rotation,\n range_image_top_pose_tensor_translation)\n', (23539, 23622), False, 'from waymo_open_dataset.utils import transform_utils\n'), ((26349, 26377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)'}), '(figsize=(20, 12))\n', (26359, 26377), True, 'import matplotlib.pyplot as plt\n'), ((26447, 26462), 'matplotlib.pyplot.grid', 'plt.grid', (['"""off"""'], {}), "('off')\n", (26455, 26462), True, 'import matplotlib.pyplot as plt\n'), ((27180, 27242), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'c': 'colors', 's': 'point_size', 'edgecolors': '"""none"""'}), "(xs, ys, c=colors, s=point_size, edgecolors='none')\n", (27191, 27242), True, 'import matplotlib.pyplot as plt\n'), ((28259, 28277), 'os.walk', 'os.walk', (['DATA_PATH'], {}), '(DATA_PATH)\n', (28266, 28277), False, 'import os\n'), ((2754, 2809), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['file_name'], {'compression_type': '""""""'}), "(file_name, compression_type='')\n", (2777, 2809), True, 'import tensorflow as tf\n'), ((5835, 5851), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (5843, 5851), True, 'import numpy as np\n'), ((8436, 8466), 'numpy.concatenate', 'np.concatenate', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (8450, 8466), True, 'import numpy as np\n'), ((10205, 10372), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[obj.box.center_x, obj.box.center_y, obj.box.center_z, obj.box.length, obj.\n box.width, obj.box.height, obj.box.heading]'], {'dtype': 'np.float32'}), '([obj.box.center_x, obj.box.center_y, obj.box.center_z,\n obj.box.length, obj.box.width, obj.box.height, obj.box.heading], dtype=\n np.float32)\n', (10225, 10372), True, 'import tensorflow as tf\n'), ((10399, 10422), 'tensorflow.reshape', 'tf.reshape', (['box', '(1, 7)'], {}), '(box, (1, 7))\n', (10409, 10422), True, 'import tensorflow as tf\n'), ((10448, 10503), 'waymo_open_dataset.utils.box_utils.compute_num_points_in_box_3d', 'box_utils.compute_num_points_in_box_3d', (['points_all', 'box'], {}), '(points_all, box)\n', (10486, 10503), False, 'from waymo_open_dataset.utils import box_utils\n'), ((11271, 11287), 'math.atan2', 'math.atan2', (['x', 'z'], {}), '(x, z)\n', (11281, 11287), False, 'import math\n'), ((13674, 13685), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (13682, 13685), True, 'import numpy as np\n'), ((15571, 15597), 'os.path.exists', 'os.path.exists', (['KITTI_PATH'], {}), '(KITTI_PATH)\n', (15585, 15597), False, 'import os\n'), ((15611, 15631), 'os.mkdir', 'os.mkdir', (['KITTI_PATH'], {}), '(KITTI_PATH)\n', (15619, 15631), False, 'import os\n'), ((15647, 15673), 'os.path.exists', 'os.path.exists', (['CALIB_PATH'], {}), '(CALIB_PATH)\n', (15661, 15673), False, 'import os\n'), ((15687, 15707), 'os.mkdir', 'os.mkdir', (['CALIB_PATH'], {}), '(CALIB_PATH)\n', (15695, 15707), False, 'import os\n'), ((15723, 15749), 'os.path.exists', 'os.path.exists', (['LIDAR_PATH'], {}), '(LIDAR_PATH)\n', (15737, 15749), False, 'import os\n'), ((15763, 15783), 'os.mkdir', 'os.mkdir', (['LIDAR_PATH'], {}), '(LIDAR_PATH)\n', (15771, 15783), False, 'import os\n'), ((15799, 15829), 'os.path.exists', 'os.path.exists', (['LABEL_ALL_PATH'], {}), '(LABEL_ALL_PATH)\n', (15813, 15829), False, 'import os\n'), ((15843, 15867), 'os.mkdir', 'os.mkdir', (['LABEL_ALL_PATH'], {}), '(LABEL_ALL_PATH)\n', (15851, 15867), False, 'import os\n'), ((15883, 15913), 'os.path.exists', 'os.path.exists', (['IMG_CALIB_PATH'], {}), '(IMG_CALIB_PATH)\n', (15897, 15913), False, 'import os\n'), ((15927, 15951), 'os.mkdir', 'os.mkdir', (['IMG_CALIB_PATH'], {}), '(IMG_CALIB_PATH)\n', (15935, 15951), False, 'import os\n'), ((15967, 15993), 'os.path.exists', 'os.path.exists', (['IMAGE_PATH'], {}), '(IMAGE_PATH)\n', (15981, 15993), False, 'import os\n'), ((16007, 16027), 'os.mkdir', 'os.mkdir', (['IMAGE_PATH'], {}), '(IMAGE_PATH)\n', (16015, 16027), False, 'import os\n'), ((16043, 16069), 'os.path.exists', 'os.path.exists', (['LABEL_PATH'], {}), '(LABEL_PATH)\n', (16057, 16069), False, 'import os\n'), ((16083, 16103), 'os.mkdir', 'os.mkdir', (['LABEL_PATH'], {}), '(LABEL_PATH)\n', (16091, 16103), False, 'import os\n'), ((16866, 16892), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['data'], {}), '(data)\n', (16886, 16892), True, 'import tensorflow as tf\n'), ((22996, 23043), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['range_image_top_pose.data'], {}), '(range_image_top_pose.data)\n', (23016, 23043), True, 'import tensorflow as tf\n'), ((24143, 24183), 'tensorflow.reverse', 'tf.reverse', (['beam_inclinations'], {'axis': '[-1]'}), '(beam_inclinations, axis=[-1])\n', (24153, 24183), True, 'import tensorflow as tf\n'), ((25222, 25263), 'tensorflow.squeeze', 'tf.squeeze', (['range_image_cartesian'], {'axis': '(0)'}), '(range_image_cartesian, axis=0)\n', (25232, 25263), True, 'import tensorflow as tf\n'), ((26160, 26179), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (26172, 26179), True, 'import matplotlib.pyplot as plt\n'), ((26397, 26437), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['camera_image.image'], {}), '(camera_image.image)\n', (26417, 26437), True, 'import tensorflow as tf\n'), ((2867, 2887), 'waymo_open_dataset.dataset_pb2.Frame', 'open_dataset.Frame', ([], {}), '()\n', (2885, 2887), True, 'from waymo_open_dataset import dataset_pb2 as open_dataset\n'), ((4706, 4742), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (4718, 4742), False, 'import cv2\n'), ((4759, 4809), 'matplotlib.pyplot.imsave', 'plt.imsave', (['img_path', 'rgb_img'], {'format': 'IMAGE_FORMAT'}), '(img_path, rgb_img, format=IMAGE_FORMAT)\n', (4769, 4809), True, 'import matplotlib.pyplot as plt\n'), ((5666, 5684), 'numpy.linalg.inv', 'np.linalg.inv', (['tmp'], {}), '(tmp)\n', (5679, 5684), True, 'import numpy as np\n'), ((6195, 6261), 'numpy.array', 'np.array', (['frame.context.camera_calibrations[0].extrinsic.transform'], {}), '(frame.context.camera_calibrations[0].extrinsic.transform)\n', (6203, 6261), True, 'import numpy as np\n'), ((16477, 16518), 'numpy.array', 'np.array', (['range_images[lidar_num][0].data'], {}), '(range_images[lidar_num][0].data)\n', (16485, 16518), True, 'import numpy as np\n'), ((17786, 17855), 'tensorflow.decode_compressed', 'tf.decode_compressed', (['laser.ri_return1.range_image_compressed', '"""ZLIB"""'], {}), "(laser.ri_return1.range_image_compressed, 'ZLIB')\n", (17806, 17855), True, 'import tensorflow as tf\n'), ((17898, 17924), 'waymo_open_dataset.dataset_pb2.MatrixFloat', 'open_dataset.MatrixFloat', ([], {}), '()\n', (17922, 17924), True, 'from waymo_open_dataset import dataset_pb2 as open_dataset\n'), ((18925, 18994), 'tensorflow.decode_compressed', 'tf.decode_compressed', (['laser.ri_return2.range_image_compressed', '"""ZLIB"""'], {}), "(laser.ri_return2.range_image_compressed, 'ZLIB')\n", (18945, 18994), True, 'import tensorflow as tf\n'), ((19037, 19063), 'waymo_open_dataset.dataset_pb2.MatrixFloat', 'open_dataset.MatrixFloat', ([], {}), '()\n', (19061, 19063), True, 'from waymo_open_dataset import dataset_pb2 as open_dataset\n'), ((20934, 20966), 'tensorflow.ones_like', 'tf.ones_like', (['range_image_tensor'], {}), '(range_image_tensor)\n', (20946, 20966), True, 'import tensorflow as tf\n'), ((22873, 22903), 'numpy.array', 'np.array', (['frame.pose.transform'], {}), '(frame.pose.transform)\n', (22881, 22903), True, 'import numpy as np\n'), ((24077, 24109), 'tensorflow.constant', 'tf.constant', (['c.beam_inclinations'], {}), '(c.beam_inclinations)\n', (24088, 24109), True, 'import tensorflow as tf\n'), ((24219, 24250), 'numpy.array', 'np.array', (['c.extrinsic.transform'], {}), '(c.extrinsic.transform)\n', (24227, 24250), True, 'import numpy as np\n'), ((24322, 24360), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['range_image.data'], {}), '(range_image.data)\n', (24342, 24360), True, 'import tensorflow as tf\n'), ((24609, 24649), 'tensorflow.expand_dims', 'tf.expand_dims', (['pixel_pose_local'], {'axis': '(0)'}), '(pixel_pose_local, axis=0)\n', (24623, 24649), True, 'import tensorflow as tf\n'), ((24685, 24719), 'tensorflow.expand_dims', 'tf.expand_dims', (['frame_pose'], {'axis': '(0)'}), '(frame_pose, axis=0)\n', (24699, 24719), True, 'import tensorflow as tf\n'), ((24890, 24940), 'tensorflow.expand_dims', 'tf.expand_dims', (['range_image_tensor[..., 0]'], {'axis': '(0)'}), '(range_image_tensor[..., 0], axis=0)\n', (24904, 24940), True, 'import tensorflow as tf\n'), ((24958, 24991), 'tensorflow.expand_dims', 'tf.expand_dims', (['extrinsic'], {'axis': '(0)'}), '(extrinsic, axis=0)\n', (24972, 24991), True, 'import tensorflow as tf\n'), ((25369, 25395), 'tensorflow.where', 'tf.where', (['range_image_mask'], {}), '(range_image_mask)\n', (25377, 25395), True, 'import tensorflow as tf\n'), ((25505, 25531), 'tensorflow.where', 'tf.where', (['range_image_mask'], {}), '(range_image_mask)\n', (25513, 25531), True, 'import tensorflow as tf\n'), ((2139, 2163), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (2161, 2163), False, 'import progressbar\n'), ((2217, 2265), 'progressbar.Bar', 'progressbar.Bar', ([], {'marker': '""">"""', 'left': '"""["""', 'right': '"""]"""'}), "(marker='>', left='[', right=']')\n", (2232, 2265), False, 'import progressbar\n'), ((2371, 2388), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (2386, 2388), False, 'import progressbar\n'), ((4605, 4639), 'numpy.frombuffer', 'np.frombuffer', (['img.image', 'np.uint8'], {}), '(img.image, np.uint8)\n', (4618, 4639), True, 'import numpy as np\n'), ((5552, 5588), 'numpy.array', 'np.array', (['camera.extrinsic.transform'], {}), '(camera.extrinsic.transform)\n', (5560, 5588), True, 'import numpy as np\n'), ((13560, 13591), 'numpy.array', 'np.array', (['camera.pose.transform'], {}), '(camera.pose.transform)\n', (13568, 13591), True, 'import numpy as np\n'), ((16596, 16637), 'numpy.array', 'np.array', (['range_images[lidar_num][1].data'], {}), '(range_images[lidar_num][1].data)\n', (16604, 16637), True, 'import numpy as np\n'), ((18174, 18248), 'tensorflow.decode_compressed', 'tf.decode_compressed', (['laser.ri_return1.range_image_pose_compressed', '"""ZLIB"""'], {}), "(laser.ri_return1.range_image_pose_compressed, 'ZLIB')\n", (18194, 18248), True, 'import tensorflow as tf\n'), ((18317, 18343), 'waymo_open_dataset.dataset_pb2.MatrixFloat', 'open_dataset.MatrixFloat', ([], {}), '()\n', (18341, 18343), True, 'from waymo_open_dataset import dataset_pb2 as open_dataset\n'), ((23873, 23934), 'tensorflow.constant', 'tf.constant', (['[c.beam_inclination_min, c.beam_inclination_max]'], {}), '([c.beam_inclination_min, c.beam_inclination_max])\n', (23884, 23934), True, 'import tensorflow as tf\n'), ((25024, 25063), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['beam_inclinations'], {}), '(beam_inclinations)\n', (25044, 25063), True, 'import tensorflow as tf\n'), ((5400, 5409), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5406, 5409), True, 'import numpy as np\n'), ((11107, 11129), 'numpy.array', 'np.array', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (11115, 11129), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright 2019 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Third party data registry integration."""
from urllib.parse import urlparse
from renku.cli._providers.zenodo import ZenodoProvider
from renku.utils.doi import is_doi
class ProviderFactory:
"""Create a provider type from URI."""
PROVIDERS = {'zenodo': ZenodoProvider}
@staticmethod
def from_uri(uri):
"""Get provider type based on uri."""
is_doi_ = is_doi(uri)
if is_doi_ is False:
url = urlparse(uri)
if bool(url.scheme and url.netloc and url.params == '') is False:
return None, 'Cannot parse URL.'
provider = None
if 'zenodo' in uri:
provider = ZenodoProvider(is_doi=is_doi_)
if is_doi_ and provider is None:
return None, (
'Provider {} not found. '.format(
uri.split('/')[1].split('.')[0] # Get DOI provider name.
) + 'Currently supporting following providers: (Zenodo, )'
)
return provider, None
@staticmethod
def from_id(provider_id):
"""Get provider type based on identifier."""
return ProviderFactory.PROVIDERS[provider_id]()
| [
"renku.utils.doi.is_doi",
"urllib.parse.urlparse",
"renku.cli._providers.zenodo.ZenodoProvider"
] | [((1142, 1153), 'renku.utils.doi.is_doi', 'is_doi', (['uri'], {}), '(uri)\n', (1148, 1153), False, 'from renku.utils.doi import is_doi\n'), ((1201, 1214), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (1209, 1214), False, 'from urllib.parse import urlparse\n'), ((1418, 1448), 'renku.cli._providers.zenodo.ZenodoProvider', 'ZenodoProvider', ([], {'is_doi': 'is_doi_'}), '(is_doi=is_doi_)\n', (1432, 1448), False, 'from renku.cli._providers.zenodo import ZenodoProvider\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 13:52:52 2022
@author: sarangbhagwat
"""
from biorefineries.TAL.system_TAL_adsorption_glucose import *
from matplotlib import pyplot as plt
import numpy as np
column = AC401
#%% Across regeneration fluid velocity and cycle time
def MPSP_at_adsorption_design(v, t):
column.regeneration_velocity = v
column.cycle_time = t
return get_SA_MPSP(), AC401.installed_cost/1e6
regen_vels = np.linspace(3., 20., 40)
cycle_times = np.linspace(1., 4., 40)
MPSPs_ads_ds = []
column_costs_ads_r_t = []
#%%
for i in regen_vels:
MPSPs_ads_ds.append([])
column_costs_ads_r_t.append([])
for j in cycle_times:
MPSP, cost = None, None
try:
MPSP, cost = MPSP_at_adsorption_design(i, j)
except:
print(i, j)
MPSP, cost = np.nan, np.nan
MPSPs_ads_ds[-1].append(MPSP)
column_costs_ads_r_t[-1].append(cost)
#%% Set parameters to optimal
min_MPSP = np.min(MPSPs_ads_ds)
opt_indices = np.where(MPSPs_ads_ds==min_MPSP)
column.regeneration_velocity = regen_vels[opt_indices[0][0]]
column.cycle_time = cycle_times[opt_indices[1][0]]
print(min_MPSP, get_SA_MPSP())
#%% Plot MPSP
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, regen_vels, MPSPs_ads_ds, levels=[4., 4.5, 5, 5.5, 6., 6.5, 7.])
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Regeneration solvent velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('MPSP [$/kg]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Plot column cost
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, regen_vels, column_costs_ads_r_t,
levels=[0, 0.25, 0.5, 0.75, 1., 1.25, 1.5, 1.75, 2., 2.25, 2.5],
)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Regeneration solvent velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%%
AC401.regeneration_velocity = 14.4
AC401.target_recovery=None
superficial_velocities = np.linspace(4., 15., 9)
cycle_times = np.linspace(1., 4., 10)
MPSPs = []
column_costs = []
for m in superficial_velocities:
AC401.superficial_velocity = m
MPSPs.append([])
column_costs.append([])
for t in cycle_times:
AC401.cycle_time = t
MPSPs[-1].append(get_SA_MPSP())
column_costs[-1].append(AC401.installed_cost/1e6)
#%% Plot column cost
# plt.contourf(superficial_velocities, cycle_times, MPSPs)
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, superficial_velocities, column_costs)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Superficial feed velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Plot MPSP
# plt.contourf(superficial_velocities, cycle_times, MPSPs)
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, superficial_velocities, MPSPs)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Superficial feed velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('MPSP [$/kg]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Across titer
AC401.regeneration_velocity = 14.4
AC401.target_recovery = 0.99
AC401.cycle_time = 2.
titers = np.linspace(2., 25., 10)
MPSPs_titer_only = []
costs_titer_only = []
for t in titers:
spec.load_specifications(spec_1=spec.baseline_yield, spec_2=t, spec_3=spec.baseline_productivity)
MPSPs.append(get_SA_MPSP())
costs_titer_only.append(AC401.installed_cost)
spec.load_specifications(spec_1=spec.baseline_yield, spec_2=spec.baseline_titer, spec_3=spec.baseline_productivity)
#%% Plot MPSP
plt.plot(titers, MPSPs_titer_only)
#%% Plot column cost
plt.plot(titers, costs_titer_only)
#%% Across titer and target recovery
# AC401.regeneration_velocity = 14.4
# AC401.target_recovery = 0.99
# # def MPSP_at_titer(t):
# # spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
# # column.regeneration_velocity = 3. + (17./25.)*t
# # return get_SA_MPSP()
# titers = np.linspace(2., 25., 10)
# recoveries = np.linspace(0.5, 0.99, 10)
# # MPSPs_titer = []
# #%%
# MPSPs_titer = []
# costs_titer = []
# for t in titers:
# MPSPs_titer.append([])
# costs_titer.append([])
# for r in recoveries:
# spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
# AC401.target_recovery = r
# MPSPs_titer[-1].append(get_SA_MPSP())
# costs_titer[-1].append(AC401.installed_cost)
# spec.load_specifications(spec.baseline_yield, spec.baseline_titer, spec.baseline_productivity)
# #%% Plot MPSP
# fig1, ax2 = plt.subplots(constrained_layout=True)
# CS = ax2.contourf(recoveries, titers, MPSPs_titer,
# # levels=[0., 2.5, 5., 7.5, 10, 12.5, 15, 17.5, 20],
# )
# CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# # ax2.set_title('Nonsense (3 masked regions)')
# ax2.set_ylabel('Fermentation titer [g/L]')
# ax2.set_xlabel('Target adsorbate recovery [% of influent]')
# # Make a colorbar for the ContourSet returned by the contourf call.
# cbar = fig1.colorbar(CS)
# cbar.ax.set_ylabel('MPSP [$/kg]')
# # Add the contour line levels to the colorbar
# cbar.add_lines(CS2)
# #%% Plot column cost
# fig1, ax2 = plt.subplots(constrained_layout=True)
# CS = ax2.contourf(recoveries, titers, costs_titer,
# # levels=[0, 2, 4, 6, 8, 10, 12, 14, 16, 18 ,20],
# )
# CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# # ax2.set_title('Nonsense (3 masked regions)')
# ax2.set_ylabel('Regeneration solvent velocity [m/s]')
# ax2.set_xlabel('Cycle time [h]')
# # Make a colorbar for the ContourSet returned by the contourf call.
# cbar = fig1.colorbar(CS)
# cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# # Add the contour line levels to the colorbar
# cbar.add_lines(CS2)
#%% Across titer with rigorous adsorption design optimization
AC401.regeneration_velocity = 14.4
AC401.target_recovery = 0.99
AC401.cycle_time = 2.
regen_vels = np.linspace(1., 14.4, 20)
# cycle_times = np.linspace(0.5, 4., 20)
opt_regen_vels = []
opt_cycle_times = []
def MPSP_and_cost_at_regen_vel(v):
column.regeneration_velocity = v
return get_SA_MPSP(), AC401.installed_cost/1e6
def MPSP_at_titer(t):
spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
MPSPs_ads_ds = []
costs_ads_ds = []
for i in regen_vels:
m, c = MPSP_and_cost_at_regen_vel(i)
MPSPs_ads_ds.append(m)
costs_ads_ds.append(c)
min_MPSP = np.min(MPSPs_ads_ds)
opt_indices = np.where(MPSPs_ads_ds==min_MPSP)
opt_regen_vels.append(regen_vels[opt_indices[0][0]])
# opt_cycle_times.append(cycle_times[opt_indices[1][0]])
column.regeneration_velocity = opt_regen_vels[-1]
# column.cycle_time = opt_cycle_times[-1]
print('titer =', t)
print(min_MPSP, column.ins[1].F_mass, column.regeneration_velocity, column.cycle_time)
print('\n')
return min_MPSP
titers = np.linspace(3., 30, 20)
#%%
MPSPs_titer = []
for i in titers:
MPSPs_titer.append(MPSP_at_titer(i))
spec.load_specifications(spec.baseline_yield, spec.baseline_titer, spec.baseline_productivity)
#%% Plot MPSP
plt.plot(titers, MPSPs_titer)
#%% Plot optimum regeneration velocity
plt.plot(titers, opt_regen_vels)
#%% Plot | [
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.min",
"matplotlib.pyplot.subplots"
] | [((448, 474), 'numpy.linspace', 'np.linspace', (['(3.0)', '(20.0)', '(40)'], {}), '(3.0, 20.0, 40)\n', (459, 474), True, 'import numpy as np\n'), ((487, 512), 'numpy.linspace', 'np.linspace', (['(1.0)', '(4.0)', '(40)'], {}), '(1.0, 4.0, 40)\n', (498, 512), True, 'import numpy as np\n'), ((977, 997), 'numpy.min', 'np.min', (['MPSPs_ads_ds'], {}), '(MPSPs_ads_ds)\n', (983, 997), True, 'import numpy as np\n'), ((1012, 1046), 'numpy.where', 'np.where', (['(MPSPs_ads_ds == min_MPSP)'], {}), '(MPSPs_ads_ds == min_MPSP)\n', (1020, 1046), True, 'import numpy as np\n'), ((1214, 1251), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (1226, 1251), True, 'from matplotlib import pyplot as plt\n'), ((1789, 1826), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (1801, 1826), True, 'from matplotlib import pyplot as plt\n'), ((2517, 2542), 'numpy.linspace', 'np.linspace', (['(4.0)', '(15.0)', '(9)'], {}), '(4.0, 15.0, 9)\n', (2528, 2542), True, 'import numpy as np\n'), ((2555, 2580), 'numpy.linspace', 'np.linspace', (['(1.0)', '(4.0)', '(10)'], {}), '(1.0, 4.0, 10)\n', (2566, 2580), True, 'import numpy as np\n'), ((2972, 3009), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (2984, 3009), True, 'from matplotlib import pyplot as plt\n'), ((3590, 3627), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (3602, 3627), True, 'from matplotlib import pyplot as plt\n'), ((4208, 4234), 'numpy.linspace', 'np.linspace', (['(2.0)', '(25.0)', '(10)'], {}), '(2.0, 25.0, 10)\n', (4219, 4234), True, 'import numpy as np\n'), ((4611, 4645), 'matplotlib.pyplot.plot', 'plt.plot', (['titers', 'MPSPs_titer_only'], {}), '(titers, MPSPs_titer_only)\n', (4619, 4645), True, 'from matplotlib import pyplot as plt\n'), ((4668, 4702), 'matplotlib.pyplot.plot', 'plt.plot', (['titers', 'costs_titer_only'], {}), '(titers, costs_titer_only)\n', (4676, 4702), True, 'from matplotlib import pyplot as plt\n'), ((7068, 7094), 'numpy.linspace', 'np.linspace', (['(1.0)', '(14.4)', '(20)'], {}), '(1.0, 14.4, 20)\n', (7079, 7094), True, 'import numpy as np\n'), ((8065, 8089), 'numpy.linspace', 'np.linspace', (['(3.0)', '(30)', '(20)'], {}), '(3.0, 30, 20)\n', (8076, 8089), True, 'import numpy as np\n'), ((8285, 8314), 'matplotlib.pyplot.plot', 'plt.plot', (['titers', 'MPSPs_titer'], {}), '(titers, MPSPs_titer)\n', (8293, 8314), True, 'from matplotlib import pyplot as plt\n'), ((8355, 8387), 'matplotlib.pyplot.plot', 'plt.plot', (['titers', 'opt_regen_vels'], {}), '(titers, opt_regen_vels)\n', (8363, 8387), True, 'from matplotlib import pyplot as plt\n'), ((7604, 7624), 'numpy.min', 'np.min', (['MPSPs_ads_ds'], {}), '(MPSPs_ads_ds)\n', (7610, 7624), True, 'import numpy as np\n'), ((7643, 7677), 'numpy.where', 'np.where', (['(MPSPs_ads_ds == min_MPSP)'], {}), '(MPSPs_ads_ds == min_MPSP)\n', (7651, 7677), True, 'import numpy as np\n')] |
from m5stack import *
from m5stack_ui import *
from uiflow import *
from ble import ble_uart
import face
screen = M5Screen()
screen.clean_screen()
screen.set_screen_bg_color(0x000000)
mb_click = None
rb_click = None
lb_click = None
snd_val = None
st_mode = None
stval = None
prval = None
faces_encode = face.get(face.ENCODE)
direction = M5Label('M5MouseWheel - Please dont touch for processing...', x=0, y=228, color=0xc7c7c7, font=FONT_MONT_12, parent=None)
LBtn = M5Btn(text='L', x=170, y=6, w=65, h=100, bg_c=0x000000, text_c=0xbcbcbc, font=FONT_UNICODE_24, parent=None)
RBtn = M5Btn(text='R', x=240, y=6, w=70, h=48, bg_c=0x000000, text_c=0xbebebe, font=FONT_UNICODE_24, parent=None)
d_w_x = M5Btn(text='WX', x=0, y=162, w=48, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
MBtn = M5Btn(text='M', x=240, y=58, w=70, h=48, bg_c=0x000000, text_c=0xbebebe, font=FONT_UNICODE_24, parent=None)
d_w_y = M5Btn(text='WY', x=52, y=162, w=48, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
b_step = M5Btn(text='STEP', x=0, y=6, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
d_y = M5Btn(text='Y', x=220, y=110, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_MONT_48, parent=None)
d_scr = M5Btn(text='SCR', x=0, y=110, w=100, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
d_x = M5Btn(text='X', x=110, y=110, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_MONT_48, parent=None)
v_step = M5Label('1', x=121, y=38, color=0xc7c7c7, font=FONT_MONT_24, parent=None)
# Change Mode
def changeMode():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
snd_val = 0
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
# Reset Mode
def resetMode():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
st_mode = ''
b_step.set_bg_color(0x000000)
d_y.set_bg_color(0x000000)
d_scr.set_bg_color(0x000000)
d_w_x.set_bg_color(0x000000)
d_w_y.set_bg_color(0x000000)
d_x.set_bg_color(0x000000)
def MBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
mb_click = 0 if mb_click == 1 else 1
uart_ble.write((str('M') + str(str(mb_click))))
if mb_click == 1:
MBtn.set_bg_color(0x666666)
else:
MBtn.set_bg_color(0x000000)
direction.set_text(str((str('M') + str(str(mb_click)))))
pass
MBtn.pressed(MBtn_pressed)
def LBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
lb_click = 0 if lb_click == 1 else 1
uart_ble.write((str('L') + str(str(lb_click))))
if lb_click == 1:
LBtn.set_bg_color(0x666666)
else:
LBtn.set_bg_color(0x000000)
direction.set_text(str((str('L') + str(str(lb_click)))))
pass
LBtn.pressed(LBtn_pressed)
def RBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
rb_click = 0 if rb_click == 1 else 1
uart_ble.write((str('R') + str(str(rb_click))))
if rb_click == 1:
RBtn.set_bg_color(0x666666)
else:
RBtn.set_bg_color(0x000000)
direction.set_text(str((str('R') + str(str(rb_click)))))
pass
RBtn.pressed(RBtn_pressed)
def b_step_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'T':
resetMode()
st_mode = 'T'
b_step.set_bg_color(0x666666)
faces_encode.setLed(0, 0xffffff)
changeMode()
pass
b_step.pressed(b_step_pressed)
def d_scr_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'S':
resetMode()
st_mode = 'S'
d_scr.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff9900)
changeMode()
pass
d_scr.pressed(d_scr_pressed)
def d_x_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'X':
resetMode()
st_mode = 'X'
d_x.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff0000)
changeMode()
pass
d_x.pressed(d_x_pressed)
def d_y_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'Y':
resetMode()
st_mode = 'Y'
d_y.set_bg_color(0x666666)
faces_encode.setLed(0, 0x3333ff)
changeMode()
pass
d_y.pressed(d_y_pressed)
def d_w_x_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'U':
resetMode()
st_mode = 'U'
d_w_x.set_bg_color(0x666666)
faces_encode.setLed(0, 0x33ff33)
changeMode()
pass
d_w_x.pressed(d_w_x_pressed)
def d_w_y_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'V':
resetMode()
st_mode = 'V'
d_w_y.set_bg_color(0x666666)
faces_encode.setLed(0, 0x00cccc)
changeMode()
pass
d_w_y.pressed(d_w_y_pressed)
resetMode()
uart_ble = ble_uart.init('m5mw_01')
stval = 1
st_mode = 'S'
prval = faces_encode.getValue()
snd_val = 0
d_scr.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff9900)
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
while True:
if (faces_encode.getValue()) != prval:
if st_mode == 'T':
stval = stval + ((faces_encode.getValue()) - prval)
v_step.set_text(str(stval))
else:
snd_val = snd_val + ((faces_encode.getValue()) - prval) * stval
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
prval = faces_encode.getValue()
wait_ms(2)
| [
"face.get",
"ble.ble_uart.init"
] | [((307, 328), 'face.get', 'face.get', (['face.ENCODE'], {}), '(face.ENCODE)\n', (315, 328), False, 'import face\n'), ((4849, 4873), 'ble.ble_uart.init', 'ble_uart.init', (['"""m5mw_01"""'], {}), "('m5mw_01')\n", (4862, 4873), False, 'from ble import ble_uart\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import shutil
import argparse
import subprocess
import numpy as np
import contextlib
import onnx
from cvi_toolkit.utils.mlir_shell import *
from cvi_toolkit.utils.intermediate_file import IntermediateFile
@contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
class ModelTest(object):
def __init__(self, chip, model_path, batch_size):
self.chip = chip
self.model_path = model_path
self.batch_size = batch_size
self.model_name = os.path.split(model_path)[-1].split(".")[0]
self.fp32_mlir = self.model_name + ".mlir"
self.cvimodel = self.model_name + ".cvimodel"
self.input_path = "./input.npz"
def __make_test_calibration_table__(self, table_name):
blobs_interp_npz = IntermediateFile(self.model_name, 'full_precision_interp.npz', False)
ret = mlir_inference(self.fp32_mlir, self.input_path, None, str(blobs_interp_npz))
if ret != 0:
raise RuntimeError("{} mlir inference failed".format(self.model_path))
tensors = np.load(str(blobs_interp_npz))
with open(table_name, "w") as f:
for name in tensors:
threshold = np.abs(np.max(tensors[name]))
if np.isnan(threshold):
threshold = 10.0
elif threshold >= 127.0:
threshold = 127.0
elif threshold <= 0.001:
threshold = 1.0
else:
pass
f.write("{} {}\n".format(name, threshold))
def run(self, quant_mode, input=None):
if self.model_path.endswith(".onnx"):
onnx_model = onnx.load(self.model_path)
input_nodes = onnx_model.graph.input
self.__gen_onnx_input__(input_nodes)
transform_cmd = [
'model_transform.py', '--model_type', 'onnx', '--model_name', self.model_name, '--model_def', self.model_path,
'--image', self.input_path, '--net_input_dims', '1,100', '--tolerance', '0.99,0.99,0.99', '--mlir',
self.fp32_mlir
]
subprocess.run(transform_cmd)
elif self.model_path.endswith(".mlir"):
tmp_mlir_file = IntermediateFile(self.model_name, 'fp32.mlir.tmp', False)
op_info_csv = IntermediateFile(self.model_name, 'op_info.csv', True)
ret = mlir_pseudo_weight(self.model_path, str(tmp_mlir_file))
ret = mlir_opt(str(tmp_mlir_file), self.fp32_mlir, str(op_info_csv))
if ret != 0:
raise RuntimeError("{} opt failed".format(self.model_path))
if quant_mode in ['bf16', 'mix_bf16']:
deploy_cmd = [
'model_deploy.py', '--model_name', self.model_name, '--mlir', self.fp32_mlir, '--quantize',
quant_mode.upper(), '--chip', self.chip, '--image', self.input_path, '--inputs_type', 'SAME',
'--outputs_type', 'SAME', '--tolerance', '0.99,0.99,0.87', '--correctness', '0.99,0.99,0.95', '--debug',
'--cvimodel', self.cvimodel
]
elif "int8" == quant_mode:
# simple cali and convert to cvimodel
table_file = IntermediateFile(self.model_name, 'calibration_table', True)
self.__make_test_calibration_table__(str(table_file))
deploy_cmd = [
'model_deploy.py', '--model_name', self.model_name, '--mlir', self.fp32_mlir, '--calibration_table',
str(table_file), '--chip', self.chip, '--image', self.input_path, '--inputs_type', 'SAME',
'--outputs_type', 'SAME', '--tolerance', '0.10,0.10,0.1', '--correctness', '0.99,0.99,0.93', '--debug',
'--cvimodel', self.cvimodel
]
else:
raise ValueError("Now just support bf16/int8")
subprocess.run(deploy_cmd)
def __gen_onnx_input__(self, input_nodes):
self.input_data = {}
for input in input_nodes:
input_shape = []
for i, dim in enumerate(input.type.tensor_type.shape.dim):
if i == 0 and dim.dim_value <= 0 and self.batch_size != 0:
input_shape.append(self.batch_size)
else:
input_shape.append(dim.dim_value)
if 1 == input.type.tensor_type.elem_type: # 1 for np.float32
self.input_data[input.name] = np.random.randn(*input_shape).astype(np.float32)
# self.input_data[input.name] = np.random.uniform(1, 6, input_shape).astype(np.float32)
elif 7 == input.type.tensor_type.elem_type: # 7 for np.int64 / torch.long
self.input_data[input.name] = np.random.randint(0, 3, input_shape).astype(np.int64)
elif 9 == input.type.tensor_type.elem_type: # 9 for boolean
self.input_data[input.name] = np.random.randint(0, 2, input_shape).astype(np.float32)
else:
raise ValueError("Not support now, add here")
np.savez("input.npz", **self.input_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="model definition file, mlir or onnx")
parser.add_argument("--quantize", choices=['bf16', 'int8', 'mix_bf16'], default="bf16", help="quant mode")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--chip", type=str, default="cv182x", help="chip type")
parser.add_argument("--out_dir", type=str, default="tmp", help="out folder")
# parser.add_argument("--excepts", default='-', help="excepts")
# parser.add_argument("--graph", action='store_true', help="generate graph to pb file")
args = parser.parse_args()
if os.path.exists(args.out_dir):
shutil.rmtree(args.out_dir)
os.makedirs(args.out_dir)
tmp_model_file = os.path.split(args.model)[-1]
shutil.copy(args.model, os.path.join(args.out_dir, tmp_model_file))
with pushd(args.out_dir):
tool = ModelTest(args.chip, tmp_model_file, args.batch_size)
tool.run(args.quantize)
| [
"os.path.exists",
"numpy.savez",
"os.makedirs",
"argparse.ArgumentParser",
"subprocess.run",
"os.path.join",
"os.getcwd",
"os.chdir",
"os.path.split",
"numpy.max",
"numpy.random.randint",
"numpy.isnan",
"onnx.load",
"shutil.rmtree",
"cvi_toolkit.utils.intermediate_file.IntermediateFile",... | [((327, 338), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (336, 338), False, 'import os\n'), ((343, 360), 'os.chdir', 'os.chdir', (['new_dir'], {}), '(new_dir)\n', (351, 360), False, 'import os\n'), ((5242, 5267), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5265, 5267), False, 'import argparse\n'), ((5898, 5926), 'os.path.exists', 'os.path.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (5912, 5926), False, 'import os\n'), ((5968, 5993), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (5979, 5993), False, 'import os\n'), ((405, 427), 'os.chdir', 'os.chdir', (['previous_dir'], {}), '(previous_dir)\n', (413, 427), False, 'import os\n'), ((909, 978), 'cvi_toolkit.utils.intermediate_file.IntermediateFile', 'IntermediateFile', (['self.model_name', '"""full_precision_interp.npz"""', '(False)'], {}), "(self.model_name, 'full_precision_interp.npz', False)\n", (925, 978), False, 'from cvi_toolkit.utils.intermediate_file import IntermediateFile\n'), ((3985, 4011), 'subprocess.run', 'subprocess.run', (['deploy_cmd'], {}), '(deploy_cmd)\n', (3999, 4011), False, 'import subprocess\n'), ((5936, 5963), 'shutil.rmtree', 'shutil.rmtree', (['args.out_dir'], {}), '(args.out_dir)\n', (5949, 5963), False, 'import shutil\n'), ((6016, 6041), 'os.path.split', 'os.path.split', (['args.model'], {}), '(args.model)\n', (6029, 6041), False, 'import os\n'), ((6074, 6116), 'os.path.join', 'os.path.join', (['args.out_dir', 'tmp_model_file'], {}), '(args.out_dir, tmp_model_file)\n', (6086, 6116), False, 'import os\n'), ((1810, 1836), 'onnx.load', 'onnx.load', (['self.model_path'], {}), '(self.model_path)\n', (1819, 1836), False, 'import onnx\n'), ((2265, 2294), 'subprocess.run', 'subprocess.run', (['transform_cmd'], {}), '(transform_cmd)\n', (2279, 2294), False, 'import subprocess\n'), ((5159, 5199), 'numpy.savez', 'np.savez', (['"""input.npz"""'], {}), "('input.npz', **self.input_data)\n", (5167, 5199), True, 'import numpy as np\n'), ((1375, 1394), 'numpy.isnan', 'np.isnan', (['threshold'], {}), '(threshold)\n', (1383, 1394), True, 'import numpy as np\n'), ((2371, 2428), 'cvi_toolkit.utils.intermediate_file.IntermediateFile', 'IntermediateFile', (['self.model_name', '"""fp32.mlir.tmp"""', '(False)'], {}), "(self.model_name, 'fp32.mlir.tmp', False)\n", (2387, 2428), False, 'from cvi_toolkit.utils.intermediate_file import IntermediateFile\n'), ((2455, 2509), 'cvi_toolkit.utils.intermediate_file.IntermediateFile', 'IntermediateFile', (['self.model_name', '"""op_info.csv"""', '(True)'], {}), "(self.model_name, 'op_info.csv', True)\n", (2471, 2509), False, 'from cvi_toolkit.utils.intermediate_file import IntermediateFile\n'), ((3348, 3408), 'cvi_toolkit.utils.intermediate_file.IntermediateFile', 'IntermediateFile', (['self.model_name', '"""calibration_table"""', '(True)'], {}), "(self.model_name, 'calibration_table', True)\n", (3364, 3408), False, 'from cvi_toolkit.utils.intermediate_file import IntermediateFile\n'), ((1333, 1354), 'numpy.max', 'np.max', (['tensors[name]'], {}), '(tensors[name])\n', (1339, 1354), True, 'import numpy as np\n'), ((633, 658), 'os.path.split', 'os.path.split', (['model_path'], {}), '(model_path)\n', (646, 658), False, 'import os\n'), ((4552, 4581), 'numpy.random.randn', 'np.random.randn', (['*input_shape'], {}), '(*input_shape)\n', (4567, 4581), True, 'import numpy as np\n'), ((4838, 4874), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', 'input_shape'], {}), '(0, 3, input_shape)\n', (4855, 4874), True, 'import numpy as np\n'), ((5011, 5047), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'input_shape'], {}), '(0, 2, input_shape)\n', (5028, 5047), True, 'import numpy as np\n')] |
import inspect
import os
import sys
from random import choice
from typing import List
__author__ = "GLNB"
__copyright__ = "GLNB"
__license__ = "mit"
try:
from .dictionaries import invisible_chars, dict_latin
except ImportError:
from dictionaries import invisible_chars, dict_latin
__location__ = os.path.join(
os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))
)
class Scrambler:
# This is done by parsing the Unicode list of confusable characters.
"""
.. code:: python
>>> from text_scrambler import Scrambler
>>> scr = Scrambler()
>>> text = "This is an example"
>>> text_1 = scr.scramble(text, level=1)
>>> #############
>>> # adding only zwj/zwnj characters
>>> print(text, text_1, sep="\\n")
This is an example
This is an example
>>> assert text != text_1
>>> print(len(text), len(text_1))
18 35
>>> # though the texts look similar, the second one has more characters
>>> #############
>>> text_2 = scr.scramble(text, level=2)
>>> # replacing some latin letters by their cyrillic/greek equivalent
>>> print(text_2)
Тhiѕ iѕ an ехаmple
>>> for char, char_2 in zip(text, text_2):
... if char != char_2:
... print(char, char_2)
...
T Т
s ѕ
s ѕ
e е
x х
a а
>>> #############
>>> text_3 = scr.scramble(text, level=3)
>>> # adding zwj/zwnj characters and replacing latin letters
>>> print(text_3)
Thіs iѕ аn eхаmple
>>> print(text, text_3, sep="\\n")
This is an example
Thіs iѕ аn eхаmple
>>> assert text_3 != text
>>> #############
>>> text_4 = scr.scramble(text, level=4)
>>> # replacing all characters by any unicode looking like character
>>> print(text_4)
⊤𝒽𝐢𝘴 𝘪𝙨 𝞪ռ 𝙚⨯𝚊mρ𝟙ҽ
>>> #
>>> # generating several versions
>>> versions = scr.generate(text, 10, level=4)
>>> for txt in versions:
... print(txt)
...
𝕋𝗵𝕚𝔰 𝙞ѕ ɑ𝗇 ꬲ𝗑𝒂m𝛠Ⲓ𝚎
𝔗һ𑣃ƽ ˛ꜱ 𝛼𝐧 𝐞𝖝𝛼m𝜌𝟏ℯ
Th𝓲𝔰 ⅈ𝔰 αn ꬲ⤬αm⍴𞸀e
𝗧𝗵i𝑠 i𝖘 ⍺𝘯 𝗲𝔁аm𝘱𝙸𝔢
⊤𝚑𝑖s ɪ𝚜 𝜶𝑛 𝖾𝘅𝒶m𝛒𝑙𝓮
𝘛h𝙞ꮪ ⅈ𝗌 𝗮𝐧 ꬲᕽ𝓪m𝜌⏽𝓮
𝙏𝕙і𝓈 ıꜱ 𝔞𝕟 𝗲𝕩𝛂mр𐌉𝚎
𝕿Ꮒℹ𝐬 𝗶𝗌 𝛼𝔫 𝗲𝐱𝓪m𝞎𝙡𝖊
⟙h𝜾ꮪ i𝘴 𝝰𝒏 𝙚ᕽ𝗮m𝗽𝗜𝗲
𝖳հ𝒊s 𝕚𝙨 𝖆𝑛 𝘦𝔁аm𝜌𝐈𝗲
>>> versions = scr.generate(text, 1000, level=1)
>>> assert len(versions) == len(set(versions))
>>> # all unique
"""
def __init__(
self,
confusables_file=os.path.join(
__location__, "txt_files", "confusablesSummary.txt"
),
):
# The confusables can be found at:
# https://www.unicode.org/Public/security/13.0.0/confusables.txt
self.confusables_file = confusables_file
self.invisible_chars = invisible_chars
self.dict_latin = dict_latin
self._parse_unicode_file()
def __str__(self):
return self.scramble("<__main__.Scrambler object>", level=4)
__repr__ = __str__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type:
print(f"exc_type: {exc_type}", file=sys.stderr)
print(f"exc_value: {exc_value}", file=sys.stderr)
print(f"exc_traceback: {exc_traceback}", file=sys.stderr)
def _parse_unicode_file(self) -> dict:
"""return a dict of the unicode confusable given
the self.confusables_file"""
self.unicode_dict = {}
file = open(self.confusables_file, encoding="utf-8")
ls_lines_confusable = []
for _ in range(32):
file.readline()
for line in file:
if line.startswith("#"):
ls_lines_confusable.append(line[:-1]) # not taking the \n
file.close()
ls_lines_confusable = ls_lines_confusable[
:-1
] # not taking the last line (total)
for line in ls_lines_confusable:
_, char, *ls_chars = line.split("\t")
if len(char) > 1:
continue
self.unicode_dict[char] = ls_chars
def scramble(self, text: str, level: int = 1) -> str:
"""return the text scrambled
:param text: the text to scramble
:type text: str
:param level: default to 1
:type level: int, optional
**level**:
1: insert non printable characters within the text
2: replace some latin letters to their Greek or Cyrillic equivalent
3: insert non printable characters and change the some latin letters to their Greek or Cyrillic equivalent
4: insert non printable chraracters change all possible letter to a randomly picked unicode letter equivalent
:return: the scrambled string
:rtype: str
"""
if level not in range(1, 5):
raise ValueError(f"level {level} not implemented")
new_text = ""
if level == 1:
for char in text:
new_text += char + choice(self.invisible_chars)
elif level == 2:
for char in text:
new_text += choice(self.dict_latin.get(char, []) + [char])
new_text += " "
elif level == 3:
for char in text:
new_text += choice(self.dict_latin.get(char, []) + [char]) + choice(
self.invisible_chars
)
elif level == 4:
for char in text:
new_text += choice(self.unicode_dict.get(char, []) + [char]) + choice(
self.invisible_chars
)
else:
raise ValueError(f"level '{level}' not implemented")
return new_text[:-1]
def generate(self, text: str, n: int = 1000, level: int = 3) -> List[str]:
"""return a list containing n versions of the text jammed
:param text: the text to be scrambled
:type text: str
:param n: the number of time the text should be scrambled, defaults to 1000
:type n: int, optional
:param level: the level of the scrambling, defaults to 3
:type level: int, optional
:return: a list of scrambled texts, all differents
:rtype: List[str]
.. code:: python
>>> from text_scrambler import Scrambler
>>> scr = Scrambler()
>>> text = "A cranial nerve nucleus is a collection of neurons in the brain stem that is associated with one or more of the cranial nerves."
>>> texts = scr.generate(text, 1000, level=1)
>>> assert texts[0] != text
>>> for scrambled_text in texts:
... assert text != scrambled_text
...
>>> print(texts[0])
A cranial nerve nucleus is a collection of neurons in the brain stem that is associated with one or more of the cranial nerves.
>>> # different from the original text
"""
ls_new_text = []
num_generated = 0
while True:
new_text = self.scramble(text, level=level)
if new_text not in ls_new_text:
ls_new_text.append(new_text)
num_generated += 1
if num_generated == n:
break
return ls_new_text
| [
"inspect.currentframe",
"random.choice",
"os.path.join",
"os.getcwd"
] | [((326, 337), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (335, 337), False, 'import os\n'), ((2878, 2943), 'os.path.join', 'os.path.join', (['__location__', '"""txt_files"""', '"""confusablesSummary.txt"""'], {}), "(__location__, 'txt_files', 'confusablesSummary.txt')\n", (2890, 2943), False, 'import os\n'), ((371, 393), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (391, 393), False, 'import inspect\n'), ((5387, 5415), 'random.choice', 'choice', (['self.invisible_chars'], {}), '(self.invisible_chars)\n', (5393, 5415), False, 'from random import choice\n'), ((5706, 5734), 'random.choice', 'choice', (['self.invisible_chars'], {}), '(self.invisible_chars)\n', (5712, 5734), False, 'from random import choice\n'), ((5907, 5935), 'random.choice', 'choice', (['self.invisible_chars'], {}), '(self.invisible_chars)\n', (5913, 5935), False, 'from random import choice\n')] |
#!/usr/bin/env python3
"""Count the frequency of various phrases, given the path to the Python PEPs.
In Python PEPs, the opposite of “subclass” is almost always “base class” — just remember that the builtin is named super(), not base()! Stats:
216 base class
0 child class
10 derived class
12 parent class
372 subclass
10 super class
44 superclass
"""
import argparse
import os
import re
import sys
TERMS = (
'superclass',
'super class',
'subclass',
'base class',
'derived class',
'parent class',
'child class',
)
def main(argv):
parser = argparse.ArgumentParser(description='PEP terminology counts')
parser.add_argument('pepsdir', help='path to PEPs repo')
try:
args = parser.parse_args(argv)
except SystemExit:
print('\nTo checkout the PEPs from version control, git clone:'
'\nhttps://github.com/python/peps.git', file=sys.stderr)
raise
peps = []
for dirpath, dirnames, filenames in os.walk(args.pepsdir):
for filename in filenames:
if filename.endswith(('.rst', '.txt')):
peps.append(os.path.join(dirpath, filename))
counts = {term: 0 for term in TERMS}
for pep in peps:
with open(pep) as f:
content = f.read()
text = ' '.join(re.findall('\w+', content.lower()))
#text = ' '.join(content.lower().replace('.'), ' ').split())
for term in TERMS:
n = text.count(' ' + term + ' ')
m = text.count(' ' + term + 'es ')
counts[term] += n + m
for term in sorted(TERMS):
print('{:5} {}'.format(counts[term], term))
if __name__ == '__main__':
main(sys.argv[1:])
| [
"os.walk",
"os.path.join",
"argparse.ArgumentParser"
] | [((589, 650), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PEP terminology counts"""'}), "(description='PEP terminology counts')\n", (612, 650), False, 'import argparse\n'), ((996, 1017), 'os.walk', 'os.walk', (['args.pepsdir'], {}), '(args.pepsdir)\n', (1003, 1017), False, 'import os\n'), ((1134, 1165), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (1146, 1165), False, 'import os\n')] |
#!/usr/bin/env python
"""
Unit tests for Generalized One-Pass Sweep-line Algorithm
- test_regionsweep_simple
- test_regionsweep_random
"""
from typing import List
from unittest import TestCase
from sources.algorithms import \
RegionSweep, RegionSweepDebug, RegionSweepOverlaps
from sources.core import \
Region, RegionPair, RegionSet
class TestRegionSweep(TestCase):
def _evaluate_regionsweep(self, regions: RegionSet, i: int) -> List[RegionPair]:
subscribers = [] #[RegionSweepDebug()]
return RegionSweepOverlaps.prepare(regions, *subscribers)(i)
def test_regionsweep_simple(self):
regionset = RegionSet(dimension=2)
regionset.add(Region([0, 0], [3, 5]))
regionset.add(Region([3, 1], [5, 5]))
regionset.add(Region([2, 4], [6, 6]))
for i in range(regionset.dimension):
expect = regionset.overlaps(i)
actual = self._evaluate_regionsweep(regionset, i)
#for pair in expect: print(f'Expect {i}:\t{pair[0]}\n\t{pair[1]}')
#for pair in actual: print(f'Actual {i}:\t{pair[0]}\n\t{pair[1]}')
for pair in expect:
#passed = "Passed" if pair in actual else "Failed"
#print(f'{passed} {i}: {pair[0]} {pair[1]}')
self.assertTrue(pair in actual)
self.assertEqual(len(expect), len(actual))
def test_regionsweep_random(self):
regionset = RegionSet.from_random(30, Region([0]*3, [100]*3), sizepc=Region([0]*3, [0.5]*3), precision=0)
actuals = []
#for region in regionset: print(f'{region}')
for i in range(regionset.dimension):
#print(f'Dimension: {i}')
expect = regionset.overlaps(i)
actual = self._evaluate_regionsweep(regionset, i)
#for pair in expect: print(f'Expect {i}: {pair[0].id} {pair[1].id}')
#for pair in actual: print(f'Actual {i}: {pair[0].id} {pair[1].id}')
for pair in expect:
#passed = "Passed" if pair in actual else "Failed"
#print(f'{passed} {i}: {pair[0].id} {pair[1].id}')
self.assertTrue(pair in actual)
self.assertEqual(len(expect), len(actual))
actuals.append(actual)
self.assertTrue(all([len(actual) for actual in actuals]))
for pair in actuals[0]:
for d in range(1, regionset.dimension):
self.assertTrue(pair in actuals[d] or (pair[1], pair[0]) in actuals[d])
| [
"sources.core.RegionSet",
"sources.algorithms.RegionSweepOverlaps.prepare",
"sources.core.Region"
] | [((629, 651), 'sources.core.RegionSet', 'RegionSet', ([], {'dimension': '(2)'}), '(dimension=2)\n', (638, 651), False, 'from sources.core import Region, RegionPair, RegionSet\n'), ((521, 571), 'sources.algorithms.RegionSweepOverlaps.prepare', 'RegionSweepOverlaps.prepare', (['regions', '*subscribers'], {}), '(regions, *subscribers)\n', (548, 571), False, 'from sources.algorithms import RegionSweep, RegionSweepDebug, RegionSweepOverlaps\n'), ((670, 692), 'sources.core.Region', 'Region', (['[0, 0]', '[3, 5]'], {}), '([0, 0], [3, 5])\n', (676, 692), False, 'from sources.core import Region, RegionPair, RegionSet\n'), ((712, 734), 'sources.core.Region', 'Region', (['[3, 1]', '[5, 5]'], {}), '([3, 1], [5, 5])\n', (718, 734), False, 'from sources.core import Region, RegionPair, RegionSet\n'), ((754, 776), 'sources.core.Region', 'Region', (['[2, 4]', '[6, 6]'], {}), '([2, 4], [6, 6])\n', (760, 776), False, 'from sources.core import Region, RegionPair, RegionSet\n'), ((1366, 1392), 'sources.core.Region', 'Region', (['([0] * 3)', '([100] * 3)'], {}), '([0] * 3, [100] * 3)\n', (1372, 1392), False, 'from sources.core import Region, RegionPair, RegionSet\n'), ((1397, 1423), 'sources.core.Region', 'Region', (['([0] * 3)', '([0.5] * 3)'], {}), '([0] * 3, [0.5] * 3)\n', (1403, 1423), False, 'from sources.core import Region, RegionPair, RegionSet\n')] |
import json
import requests
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.history.deserialization import deserialize_price
from rotkehlchen.types import Price
PRICE_API_URL = 'https://bisq.markets/api/ticker?market={symbol}_BTC'
def get_bisq_market_price(asset: Asset) -> Price:
"""
Get price for pair at bisq marketplace. Price is returned against BTC.
Can raise:
- RemoteError: If the market doesn't exists or request fails
- DeserializationError: If the data returned is not a valid price
"""
url = PRICE_API_URL.format(symbol=asset.symbol)
try:
response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'bisq.markets request {url} failed due to {str(e)}') from e
try:
data = response.json()
except json.decoder.JSONDecodeError as e:
raise RemoteError(
f'Failed to read json response from bisq.markets. {response.text}. {str(e)}',
) from e
if 'error' in data:
raise RemoteError(f'Request data from bisq.markets {url} is not valid {data["error"]}')
try:
price = data['last']
except KeyError as e:
raise DeserializationError(
f'Response from bisq.markets didnt contain expected key "last". {data}',
) from e
return deserialize_price(price)
| [
"rotkehlchen.history.deserialization.deserialize_price",
"rotkehlchen.errors.misc.RemoteError",
"requests.get",
"rotkehlchen.errors.serialization.DeserializationError"
] | [((1541, 1565), 'rotkehlchen.history.deserialization.deserialize_price', 'deserialize_price', (['price'], {}), '(price)\n', (1558, 1565), False, 'from rotkehlchen.history.deserialization import deserialize_price\n'), ((796, 844), 'requests.get', 'requests.get', (['url'], {'timeout': 'DEFAULT_TIMEOUT_TUPLE'}), '(url, timeout=DEFAULT_TIMEOUT_TUPLE)\n', (808, 844), False, 'import requests\n'), ((1245, 1331), 'rotkehlchen.errors.misc.RemoteError', 'RemoteError', (['f"""Request data from bisq.markets {url} is not valid {data[\'error\']}"""'], {}), '(\n f"Request data from bisq.markets {url} is not valid {data[\'error\']}")\n', (1256, 1331), False, 'from rotkehlchen.errors.misc import RemoteError\n'), ((1406, 1504), 'rotkehlchen.errors.serialization.DeserializationError', 'DeserializationError', (['f"""Response from bisq.markets didnt contain expected key "last". {data}"""'], {}), '(\n f\'Response from bisq.markets didnt contain expected key "last". {data}\')\n', (1426, 1504), False, 'from rotkehlchen.errors.serialization import DeserializationError\n')] |
def game_main():
### IMPORTS ###
import colorama
from colorama import Fore
from engine import engineScript
from engine import clearScript
from os import environ
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import pygame
### ENGINE INITIALIZATION ###
settings = ["width", "height"]
engineScript.InitEngine(Fore, settings)
pygame.init()
### PROGRAM TERMINATED ###
clearScript.run()
if __name__ == "__main__":
game_main() | [
"engine.clearScript.run",
"engine.engineScript.InitEngine",
"pygame.init"
] | [((339, 378), 'engine.engineScript.InitEngine', 'engineScript.InitEngine', (['Fore', 'settings'], {}), '(Fore, settings)\n', (362, 378), False, 'from engine import engineScript\n'), ((384, 397), 'pygame.init', 'pygame.init', ([], {}), '()\n', (395, 397), False, 'import pygame\n'), ((441, 458), 'engine.clearScript.run', 'clearScript.run', ([], {}), '()\n', (456, 458), False, 'from engine import clearScript\n')] |
__all__ = ['FavIcon']
from dataclasses import dataclass, field
from html import escape as html_escape
@dataclass
class FavIcon:
href: str
rel: str = "icon"
mimetype: str = "image/x-icon"
rendered: str = field(init=False, repr=False)
def __post_init__(self):
self.rendered = f'<link rel="{self.rel}" type="{self.mimetype}" href="{html_escape(self.href)}">'
| [
"html.escape",
"dataclasses.field"
] | [((221, 250), 'dataclasses.field', 'field', ([], {'init': '(False)', 'repr': '(False)'}), '(init=False, repr=False)\n', (226, 250), False, 'from dataclasses import dataclass, field\n'), ((360, 382), 'html.escape', 'html_escape', (['self.href'], {}), '(self.href)\n', (371, 382), True, 'from html import escape as html_escape\n')] |
import tensorflow as tf
from KENN2.layers.residual.KnowledgeEnhancer import KnowledgeEnhancer
class Kenn(tf.keras.layers.Layer):
def __init__(self, predicates, clauses, activation=lambda x: x, initial_clause_weight=0.5, save_training_data=False, **kwargs):
"""Initialize the knowledge base.
:param predicates: a list of predicates names
:param clauses: a list of constraints. Each constraint is a string on the form:
clause_weight:clause
The clause_weight should be either a real number (in such a case this value is fixed) or an underscore
(in this case the weight will be a tensorflow variable and learned during training).
The clause must be represented as a list of literals separated by commas (that represent disjunctions).
Negation must specified by adding the letter 'n' before the predicate name.
An example:
_:nDog,Animal
"""
super(Kenn, self).__init__(**kwargs)
self.predicates = predicates
self.clauses = clauses
self.activation = activation
self.initial_clause_weight = initial_clause_weight
self.save_training_data = save_training_data
self.knowledge_enhancer = None
def build(self, input_shape):
"""Build the layer
:param input_shape: the input shape
"""
self.knowledge_enhancer = KnowledgeEnhancer(
self.predicates, self.clauses, self.initial_clause_weight, self.save_training_data)
super(Kenn, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Improve the satisfaction level of a set of clauses.
:param inputs: the tensor containing predicates' pre-activation values for many entities
:return: final preactivations"""
if self.save_training_data:
deltas, deltas_list = self.knowledge_enhancer(inputs)
return self.activation(inputs + deltas), deltas_list
else:
deltas = self.knowledge_enhancer(inputs)
return self.activation(inputs + deltas)
def get_config(self):
config = super(Kenn, self).get_config()
config.update({'predicates': self.predicates})
config.update({'clauses': self.clauses})
config.update({'activation': self.activation})
config.update({'initial_clause_weight': self.initial_clause_weight})
# config['output_size'] = # say self. _output_size if you store the argument in __init__
return config
| [
"KENN2.layers.residual.KnowledgeEnhancer.KnowledgeEnhancer"
] | [((1396, 1501), 'KENN2.layers.residual.KnowledgeEnhancer.KnowledgeEnhancer', 'KnowledgeEnhancer', (['self.predicates', 'self.clauses', 'self.initial_clause_weight', 'self.save_training_data'], {}), '(self.predicates, self.clauses, self.initial_clause_weight,\n self.save_training_data)\n', (1413, 1501), False, 'from KENN2.layers.residual.KnowledgeEnhancer import KnowledgeEnhancer\n')] |
from lake.top.memory_interface import MemoryPort, MemoryPortType
from lake.top.memory_controller import MemoryController
from kratos import *
from lake.attributes.config_reg_attr import ConfigRegAttr
from lake.passes.passes import lift_config_reg
from lake.modules.reg_fifo import RegFIFO
import kratos as kts
class StrgRAM(MemoryController):
'''
Storage RAM
Does ROM/RAM from storage
'''
def __init__(self,
data_width=16,
banks=1,
memory_width=64,
memory_depth=512,
num_tiles=1,
rw_same_cycle=False, # Same as separate addresses
read_delay=1,
addr_width=16,
prioritize_write=True):
super().__init__("strg_ram", debug=True)
# Generation parameters
self.banks = banks
self.data_width = data_width
self.memory_width = memory_width
self.memory_depth = memory_depth
self.num_tiles = num_tiles
self.rw_same_cycle = rw_same_cycle
self.read_delay = read_delay
self.addr_width = addr_width
self.fw_int = int(self.memory_width / self.data_width)
self.prioritize_write = prioritize_write
self.bank_width = clog2(self.banks)
self.word_width = max(1, clog2(self.fw_int))
self.mem_addr_width = clog2(self.num_tiles * self.memory_depth)
self.b_a_off = clog2(self.fw_int) + clog2(self.num_tiles * self.memory_depth)
# assert banks > 1 or rw_same_cycle is True or self.fw_int > 1, \
# "Can't sustain throughput with this setup. Need potential bandwidth for " + \
# "1 write and 1 read in a cycle - try using more banks or a macro that supports 1R1W"
# Clock and Reset
self._clk = self.clock("clk")
self._rst_n = self.reset("rst_n")
# Inputs + Outputs
self._wen = self.input("wen", 1)
self._ren = self.input("ren", 1)
self._data_in = self.input("data_in", self.data_width)
self._wr_addr_in = self.input("wr_addr_in", self.addr_width)
self._rd_addr_in = self.input("rd_addr_in", self.addr_width)
self._wr_addr = self.var("wr_addr", self.addr_width)
self._rd_addr = self.var("rd_addr", self.addr_width)
# Separate addressing...
if self.rw_same_cycle:
self.wire(self._wr_addr, self._wr_addr_in)
self.wire(self._rd_addr, self._rd_addr_in)
# Use the wr addr for both in this case...
else:
self.wire(self._wr_addr, self._wr_addr_in)
self.wire(self._rd_addr, self._wr_addr_in)
self._data_out = self.output("data_out", self.data_width)
self._valid_out = self.output("valid_out", 1)
# get relevant signals from the storage banks
self._data_from_strg = self.input("data_from_strg", self.data_width,
size=(self.banks,
self.fw_int),
explicit_array=True,
packed=True)
self._wen_addr = self.var("wen_addr", self.addr_width,
size=self.banks,
explicit_array=True,
packed=True)
self._ren_addr = self.var("ren_addr", self.addr_width,
size=self.banks,
explicit_array=True,
packed=True)
self._data_to_strg = self.output("data_to_strg", self.data_width,
size=(self.banks,
self.fw_int),
explicit_array=True,
packed=True)
self._wen_to_strg = self.output("wen_to_strg", self.banks)
self._ren_to_strg = self.output("ren_to_strg", self.banks)
self._addr_out = self.output("addr_out", self.mem_addr_width,
size=self.banks,
packed=True,
explicit_array=True)
self._rd_bank = self.var("rd_bank", max(1, clog2(self.banks)))
self.set_read_bank()
self._rd_valid = self.var("rd_valid", 1)
self.set_read_valid()
if self.fw_int == 1:
self.wire(self._valid_out, self._rd_valid)
# Fetch width of 1 is simpler...
if self.fw_int == 1:
# Set data to storage
if self.banks == 1:
self.wire(self._wen_to_strg, self._wen)
self.wire(self._ren_to_strg, self._ren)
self.wire(self._data_to_strg[0], self._data_in)
self.wire(self._addr_out[0],
kts.ternary(self._wen_to_strg[0],
self._wr_addr[self.mem_addr_width - 1, 0],
self._rd_addr[self.mem_addr_width - 1, 0]))
else:
for i in range(self.banks):
self.wire(self._data_to_strg[i], self._data_in)
self.add_code(self.decode_wen, idx=i)
self.add_code(self.decode_ren, idx=i)
self.wire(self._addr_out[i],
kts.ternary(self._wen_to_strg[i],
self._wr_addr[self.mem_addr_width - 1, 0],
self._rd_addr[self.mem_addr_width - 1, 0]))
self.wire(self._data_out, self._data_from_strg[self._rd_bank])
elif self.read_delay == 1:
self._data_to_write = self.var("data_to_write", self.data_width)
self._addr_to_write = self.var("addr_to_write", self.addr_width)
self.add_code(self.set_dat_to_write)
self.add_code(self.set_addr_to_write)
self._write_gate = self.var("write_gate", 1)
self._read_gate = self.var("read_gate", 1)
self._data_combined = self.var("data_combined", self.data_width,
size=self.fw_int,
explicit_array=True,
packed=True)
for i in range(self.banks):
self.wire(self._data_to_strg[i], self._data_combined)
# read-modify-write implies we need to stall upstream
self._ready = self.output("ready", 1)
# Otherwise implement the state machine for read-modify-write
self.rmw_fsm = self.add_fsm("r_w_seq", reset_high=False)
IDLE = self.rmw_fsm.add_state("IDLE")
READ = self.rmw_fsm.add_state("READ")
MODIFY = self.rmw_fsm.add_state("MODIFY")
DEFAULT = self.rmw_fsm.add_state("_DEFAULT")
self.rmw_fsm.output(self._ready)
self.rmw_fsm.output(self._valid_out)
self.rmw_fsm.output(self._data_out)
self.rmw_fsm.output(self._write_gate)
self.rmw_fsm.output(self._read_gate)
# In IDLE we go to a read state if reading, and modify state
# if writing....
IDLE.next(IDLE, ~(self._wen) & ~(self._ren))
IDLE.next(READ, self._ren & ~self._wen)
IDLE.next(MODIFY, self._wen)
# OUT
IDLE.output(self._ready, 1)
IDLE.output(self._valid_out, 0)
IDLE.output(self._data_out, 0)
IDLE.output(self._write_gate, 0)
IDLE.output(self._read_gate, 1)
# In READ, we effectively use the same transitions as IDLE
READ.next(IDLE, ~self._wen & ~self._ren)
READ.next(READ, self._ren & ~self._wen)
READ.next(MODIFY, self._wen)
# OUT
READ.output(self._ready, 1)
READ.output(self._valid_out, 1)
READ.output(self._data_out,
self._data_from_strg[self._rd_bank][self._addr_to_write[self.word_width - 1, 0]])
READ.output(self._write_gate, 0)
READ.output(self._read_gate, 1)
# In MODIFY we always go back to idle
MODIFY.next(IDLE, const(1, 1))
MODIFY.output(self._ready, 0)
MODIFY.output(self._valid_out, 0)
MODIFY.output(self._data_out, 0)
MODIFY.output(self._write_gate, 1)
MODIFY.output(self._read_gate, 0)
# In DEFAULT we always stick in DEFAULT because it's over...
DEFAULT.next(DEFAULT, const(1, 1))
DEFAULT.output(self._ready, 0)
DEFAULT.output(self._valid_out, 0)
DEFAULT.output(self._data_out, 0)
DEFAULT.output(self._write_gate, 0)
DEFAULT.output(self._read_gate, 0)
self.rmw_fsm.set_start_state(IDLE)
if self.banks == 1:
self.wire(self._ren_to_strg, (self._wen | self._ren) & self._read_gate)
self.wire(self._wen_to_strg, self._write_gate)
else:
for i in range(self.banks):
self.add_code(self.set_wen_rmw, idx=i)
self.add_code(self.set_ren_rmw, idx=i)
for i in range(self.banks):
self.add_code(self.set_addr_rmw, idx=i)
for i in range(self.fw_int):
self.add_code(self.set_data_combined, idx=i)
# If read delay is 0, we can rmw in the same cycle (TIMING?)
else:
assert self.read_delay == 0
raise NotImplementedError
self.base_ports = [[None]]
rw_port = MemoryPort(MemoryPortType.READWRITE)
rw_port_intf = rw_port.get_port_interface()
rw_port_intf['data_in'] = self._data_to_strg
rw_port_intf['data_out'] = self._data_from_strg
rw_port_intf['write_addr'] = self._addr_out
rw_port_intf['write_enable'] = self._wen_to_strg
rw_port_intf['read_addr'] = self._addr_out
rw_port_intf['read_enable'] = self._ren_to_strg
rw_port.annotate_port_signals()
self.base_ports[0][0] = rw_port
def set_read_bank(self):
if self.banks == 1:
self.wire(self._rd_bank, kts.const(0, 1))
else:
# The read bank is comb if no delay, otherwise delayed
if self.read_delay == 1:
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def read_bank_ff(self):
if ~self._rst_n:
self._rd_bank = 0
else:
self._rd_bank = \
self._rd_addr[self.b_a_off + self.bank_width - 1, self.b_a_off]
self.add_code(read_bank_ff)
else:
@always_comb
def read_bank_comb(self):
self._rd_bank = \
self._rd_addr[self.b_a_off + self.bank_width - 1, self.b_a_off]
self.add_code(read_bank_comb)
def set_read_valid(self):
# The read bank is comb if no delay, otherwise delayed
if self.read_delay == 1:
if self.rw_same_cycle:
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def read_valid_ff(self):
if ~self._rst_n:
self._rd_valid = 0
else:
# Don't need write priority if both go at once
self._rd_valid = self._ren
self.add_code(read_valid_ff)
else:
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def read_valid_ff(self):
if ~self._rst_n:
self._rd_valid = 0
else:
# Assumes write priority
self._rd_valid = self._ren & ~self._wen
self.add_code(read_valid_ff)
else:
if self.rw_same_cycle:
self.wire(self._rd_valid, self._ren)
else:
self.wire(self._rd_valid, self._ren & ~self._wen)
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def set_dat_to_write(self):
if ~self._rst_n:
self._data_to_write = 0
else:
self._data_to_write = self._data_in
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def set_addr_to_write(self):
if ~self._rst_n:
self._addr_to_write = 0
else:
self._addr_to_write = self._wr_addr
@always_comb
def decode_wen(self, idx):
self._wen_to_strg[idx] = \
self._wen & (self._wr_addr[self.b_a_off + self.bank_width - 1, self.b_a_off] == idx)
@always_comb
def decode_ren(self, idx):
self._ren_to_strg[idx] = \
self._ren & (self._rd_addr[self.b_a_off + self.bank_width - 1, self.b_a_off] == idx)
@always_comb
def set_ren_rmw(self, idx):
self._ren_to_strg[idx] = \
((self._wen | self._ren) & self._read_gate &
(self._rd_addr[self.b_a_off + self.bank_width - 1, self.b_a_off] == idx))
@always_comb
def set_wen_rmw(self, idx):
self._wen_to_strg[idx] = \
self._write_gate & (self._addr_to_write[self.b_a_off + self.bank_width - 1, self.b_a_off] == idx)
@always_comb
def set_addr_rmw(self, idx):
self._addr_out[idx] = self._rd_addr[self.mem_addr_width + self.word_width - 1, self.word_width]
# If we are performing the write
if self._wen & ~self._write_gate:
self._addr_out[idx] = self._wr_addr[self.mem_addr_width + self.word_width - 1, self.word_width]
elif self._write_gate:
self._addr_out[idx] = \
self._addr_to_write[self.mem_addr_width + self.word_width - 1, self.word_width]
@always_comb
def set_data_combined(self, idx):
# If the word matches the index, use the data to write
# (replace the word)
if self._addr_to_write[self.word_width - 1, 0] == idx:
self._data_combined[idx] = self._data_to_write
# Otherwise keep the data
else:
self._data_combined[idx] = self._data_from_strg[self._rd_bank][idx]
def get_memory_ports(self):
return self.base_ports
def get_bitstream(self, config_json):
config = []
return config
# raise NotImplementedError
def get_config_mode_str(self):
return "ROM"
if __name__ == "__main__":
stg_dut = StrgRAM()
verilog(stg_dut, filename="strg_ram.sv",
additional_passes={"lift config regs": lift_config_reg})
| [
"kratos.ternary",
"kratos.const",
"lake.top.memory_interface.MemoryPort"
] | [((9747, 9783), 'lake.top.memory_interface.MemoryPort', 'MemoryPort', (['MemoryPortType.READWRITE'], {}), '(MemoryPortType.READWRITE)\n', (9757, 9783), False, 'from lake.top.memory_interface import MemoryPort, MemoryPortType\n'), ((10336, 10351), 'kratos.const', 'kts.const', (['(0)', '(1)'], {}), '(0, 1)\n', (10345, 10351), True, 'import kratos as kts\n'), ((4921, 5044), 'kratos.ternary', 'kts.ternary', (['self._wen_to_strg[0]', 'self._wr_addr[self.mem_addr_width - 1, 0]', 'self._rd_addr[self.mem_addr_width - 1, 0]'], {}), '(self._wen_to_strg[0], self._wr_addr[self.mem_addr_width - 1, 0],\n self._rd_addr[self.mem_addr_width - 1, 0])\n', (4932, 5044), True, 'import kratos as kts\n'), ((5443, 5566), 'kratos.ternary', 'kts.ternary', (['self._wen_to_strg[i]', 'self._wr_addr[self.mem_addr_width - 1, 0]', 'self._rd_addr[self.mem_addr_width - 1, 0]'], {}), '(self._wen_to_strg[i], self._wr_addr[self.mem_addr_width - 1, 0],\n self._rd_addr[self.mem_addr_width - 1, 0])\n', (5454, 5566), True, 'import kratos as kts\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mdot_rest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='IntendedAudience',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('slug', models.SlugField(max_length=30)),
],
),
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=60)),
('slug', models.SlugField(max_length=60)),
('feature_desc', models.CharField(max_length=120)),
('featured', models.BooleanField(default=False)),
('accessible', models.BooleanField(default=False)),
('responsive_web', models.BooleanField(default=False)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
],
),
migrations.RemoveField(
model_name='resourcelink',
name='Google_Play_url',
),
migrations.RemoveField(
model_name='resourcelink',
name='Windows_Store_url',
),
migrations.RemoveField(
model_name='resourcelink',
name='created_date',
),
migrations.RemoveField(
model_name='resourcelink',
name='feature_desc',
),
migrations.RemoveField(
model_name='resourcelink',
name='iTunes_url',
),
migrations.RemoveField(
model_name='resourcelink',
name='last_modified',
),
migrations.RemoveField(
model_name='resourcelink',
name='name',
),
migrations.RemoveField(
model_name='resourcelink',
name='short_desc',
),
migrations.RemoveField(
model_name='resourcelink',
name='support_url',
),
migrations.RemoveField(
model_name='resourcelink',
name='web_url',
),
migrations.AddField(
model_name='resourcelink',
name='link_type',
field=models.CharField(default='WEB', max_length=3, choices=[(b'AND', b'Android'), (b'IOS', b'iOS'), (b'WEB', b'Web'), (b'WIP', b'Windows Phone')]),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='slug',
field=models.SlugField(default='default_slug', max_length=60),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='title',
field=models.CharField(default='default_title', max_length=60),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='url',
field=models.URLField(default='default_url'),
preserve_default=False,
),
migrations.AddField(
model_name='intendedaudience',
name='resource',
field=models.ManyToManyField(to='mdot_rest.Resource'),
),
migrations.AddField(
model_name='resourcelink',
name='resource',
field=models.ManyToManyField(to='mdot_rest.Resource'),
),
]
| [
"django.db.models.ManyToManyField",
"django.db.models.DateTimeField",
"django.db.models.BooleanField",
"django.db.models.SlugField",
"django.db.models.AutoField",
"django.db.models.URLField",
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((1353, 1426), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""Google_Play_url"""'}), "(model_name='resourcelink', name='Google_Play_url')\n", (1375, 1426), False, 'from django.db import models, migrations\n'), ((1471, 1546), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""Windows_Store_url"""'}), "(model_name='resourcelink', name='Windows_Store_url')\n", (1493, 1546), False, 'from django.db import models, migrations\n'), ((1591, 1661), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""created_date"""'}), "(model_name='resourcelink', name='created_date')\n", (1613, 1661), False, 'from django.db import models, migrations\n'), ((1706, 1776), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""feature_desc"""'}), "(model_name='resourcelink', name='feature_desc')\n", (1728, 1776), False, 'from django.db import models, migrations\n'), ((1821, 1889), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""iTunes_url"""'}), "(model_name='resourcelink', name='iTunes_url')\n", (1843, 1889), False, 'from django.db import models, migrations\n'), ((1934, 2005), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""last_modified"""'}), "(model_name='resourcelink', name='last_modified')\n", (1956, 2005), False, 'from django.db import models, migrations\n'), ((2050, 2112), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""name"""'}), "(model_name='resourcelink', name='name')\n", (2072, 2112), False, 'from django.db import models, migrations\n'), ((2157, 2225), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""short_desc"""'}), "(model_name='resourcelink', name='short_desc')\n", (2179, 2225), False, 'from django.db import models, migrations\n'), ((2270, 2339), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""support_url"""'}), "(model_name='resourcelink', name='support_url')\n", (2292, 2339), False, 'from django.db import models, migrations\n'), ((2384, 2449), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""resourcelink"""', 'name': '"""web_url"""'}), "(model_name='resourcelink', name='web_url')\n", (2406, 2449), False, 'from django.db import models, migrations\n'), ((2602, 2747), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""WEB"""', 'max_length': '(3)', 'choices': "[(b'AND', b'Android'), (b'IOS', b'iOS'), (b'WEB', b'Web'), (b'WIP',\n b'Windows Phone')]"}), "(default='WEB', max_length=3, choices=[(b'AND', b'Android'),\n (b'IOS', b'iOS'), (b'WEB', b'Web'), (b'WIP', b'Windows Phone')])\n", (2618, 2747), False, 'from django.db import models, migrations\n'), ((2903, 2958), 'django.db.models.SlugField', 'models.SlugField', ([], {'default': '"""default_slug"""', 'max_length': '(60)'}), "(default='default_slug', max_length=60)\n", (2919, 2958), False, 'from django.db import models, migrations\n'), ((3119, 3175), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""default_title"""', 'max_length': '(60)'}), "(default='default_title', max_length=60)\n", (3135, 3175), False, 'from django.db import models, migrations\n'), ((3334, 3372), 'django.db.models.URLField', 'models.URLField', ([], {'default': '"""default_url"""'}), "(default='default_url')\n", (3349, 3372), False, 'from django.db import models, migrations\n'), ((3540, 3587), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""mdot_rest.Resource"""'}), "(to='mdot_rest.Resource')\n", (3562, 3587), False, 'from django.db import models, migrations\n'), ((3715, 3762), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""mdot_rest.Resource"""'}), "(to='mdot_rest.Resource')\n", (3737, 3762), False, 'from django.db import models, migrations\n'), ((347, 440), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (363, 440), False, 'from django.db import models, migrations\n'), ((464, 495), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (480, 495), False, 'from django.db import models, migrations\n'), ((523, 554), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (539, 554), False, 'from django.db import models, migrations\n'), ((688, 781), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (704, 781), False, 'from django.db import models, migrations\n'), ((805, 836), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (821, 836), False, 'from django.db import models, migrations\n'), ((864, 895), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (880, 895), False, 'from django.db import models, migrations\n'), ((931, 963), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)'}), '(max_length=120)\n', (947, 963), False, 'from django.db import models, migrations\n'), ((995, 1029), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1014, 1029), False, 'from django.db import models, migrations\n'), ((1063, 1097), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1082, 1097), False, 'from django.db import models, migrations\n'), ((1135, 1169), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1154, 1169), False, 'from django.db import models, migrations\n'), ((1205, 1244), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1225, 1244), False, 'from django.db import models, migrations\n'), ((1281, 1316), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1301, 1316), False, 'from django.db import models, migrations\n')] |
import numpy as np
def validate_1d_array(x, size=None):
'''Validate type and dimensions of an object x.'''
assert isinstance(x, np.ndarray), 'Expecting a numpy array.'
assert x.ndim == 1, 'Expecting a one-dimensional array.'
if size is not None:
assert x.size == size, 'Array size is different from expected.'
def validate_2d_array(x, n_cols=None, n_rows=None):
'''Validate type and dimensions of an object x.'''
assert isinstance(x, np.ndarray), 'Expecting a numpy array.'
assert x.ndim == 2, 'Expecting a two-dimensional array.'
if n_rows is not None:
assert x.shape[0] == n_rows, 'Array size is different from expected.'
if n_cols is not None:
assert x.shape[1] == n_cols, 'Number of columns is different from expected.'
def validate_integer_array(x):
'''Validate array elements are integers.'''
assert (np.round(x) == x).all(), 'Expecting an array of integers.'
def validate_positive_array(x):
'''Validate array elements are positive.'''
assert (x > 0).all(), 'Expecting array of positive elements.'
def validate_non_negative_array(x):
'''Validate array elements are non-negative.'''
assert (x >= 0).all(), 'Expecting array of non-negative elements.'
| [
"numpy.round"
] | [((885, 896), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (893, 896), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import argparse
import sys
import yaml
import logging
import logging.config
from datetime import datetime
from classes import pcdm,ocr,util
from handler import ndnp
import rdflib
from rdflib import RDF
from lxml import etree as ET
from classes.exceptions import RESTAPIException, DataReadException
logger = logging.getLogger(__name__)
def extract(fcrepo, uri):
fcrepo.open_transaction()
try:
logger.info("Getting {0} from repository".format(uri))
page = ndnp.Page.from_repository(fcrepo, uri)
logger.info("Creating annotations for page {0}".format(page.title))
for annotation in page.textblocks():
annotation.create_object(fcrepo)
annotation.update_object(fcrepo)
fcrepo.commit_transaction()
return True
except (RESTAPIException, DataReadException) as e:
# if anything fails during item creation or commiting the transaction
# attempt to rollback the current transaction
# failures here will be caught by the main loop's exception handler
# and should trigger a system exit
logger.error("OCR extraction failed: {0}".format(e))
fcrepo.rollback_transaction()
logger.warn('Transaction rolled back. Continuing load.')
def main():
'''Parse args and handle options.'''
parser = argparse.ArgumentParser(
description='Extract OCR text and create annotations.'
)
# Path to the repo config (endpoint, relpath, credentials, and WebAC paths)
parser.add_argument('-r', '--repo',
help='Path to repository configuration file.',
action='store',
required=True
)
parser.add_argument('--ignore', '-i',
help='file listing items to ignore',
action='store'
)
args = parser.parse_args()
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
# configure logging
with open('config/logging.yml', 'r') as configfile:
logging_config = yaml.safe_load(configfile)
logfile = 'logs/extractocr.py.{0}.log'.format(now)
logging_config['handlers']['file']['filename'] = logfile
logging.config.dictConfig(logging_config)
# Load required repository config file and create repository object
with open(args.repo, 'r') as repoconfig:
fcrepo = pcdm.Repository(yaml.safe_load(repoconfig))
logger.info('Loaded repo configuration from {0}'.format(args.repo))
fieldnames = ['uri', 'timestamp']
# read the log of completed items
try:
completed = util.ItemLog('logs/annotated.csv', fieldnames, 'uri')
except Exception as e:
logger.error('Non-standard map file specified: {0}'.format(e))
sys.exit(1)
logger.info('Found {0} completed items'.format(len(completed)))
if args.ignore is not None:
try:
ignored = util.ItemLog(args.ignore, fieldnames, 'uri')
except Exception as e:
logger.error('Non-standard ignore file specified: {0}'.format(e))
sys.exit(1)
else:
ignored = []
skipfile = 'logs/skipped.extractocr.{0}.csv'.format(now)
skipped = util.ItemLog(skipfile, fieldnames, 'uri')
with fcrepo.at_path('/annotations'):
for line in sys.stdin:
uri = line.rstrip('\n')
if uri in completed:
continue
elif uri in ignored:
logger.debug('Ignoring {0}'.format(uri))
continue
is_extracted = False
try:
is_extracted = extract(fcrepo, uri)
except RESTAPIException as e:
logger.error(
"Unable to commit or rollback transaction, aborting"
)
sys.exit(1)
row = {
'uri': uri,
'timestamp': str(datetime.utcnow())
}
if is_extracted:
completed.writerow(row)
else:
skipped.writerow(row)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"classes.util.ItemLog",
"argparse.ArgumentParser",
"datetime.datetime.utcnow",
"logging.config.dictConfig",
"yaml.safe_load",
"handler.ndnp.Page.from_repository",
"sys.exit"
] | [((332, 359), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (349, 359), False, 'import logging\n'), ((1350, 1429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract OCR text and create annotations."""'}), "(description='Extract OCR text and create annotations.')\n", (1373, 1429), False, 'import argparse\n'), ((3262, 3303), 'classes.util.ItemLog', 'util.ItemLog', (['skipfile', 'fieldnames', '"""uri"""'], {}), "(skipfile, fieldnames, 'uri')\n", (3274, 3303), False, 'from classes import pcdm, ocr, util\n'), ((504, 542), 'handler.ndnp.Page.from_repository', 'ndnp.Page.from_repository', (['fcrepo', 'uri'], {}), '(fcrepo, uri)\n', (529, 542), False, 'from handler import ndnp\n'), ((2105, 2131), 'yaml.safe_load', 'yaml.safe_load', (['configfile'], {}), '(configfile)\n', (2119, 2131), False, 'import yaml\n'), ((2264, 2305), 'logging.config.dictConfig', 'logging.config.dictConfig', (['logging_config'], {}), '(logging_config)\n', (2289, 2305), False, 'import logging\n'), ((2668, 2721), 'classes.util.ItemLog', 'util.ItemLog', (['"""logs/annotated.csv"""', 'fieldnames', '"""uri"""'], {}), "('logs/annotated.csv', fieldnames, 'uri')\n", (2680, 2721), False, 'from classes import pcdm, ocr, util\n'), ((1956, 1973), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1971, 1973), False, 'from datetime import datetime\n'), ((2457, 2483), 'yaml.safe_load', 'yaml.safe_load', (['repoconfig'], {}), '(repoconfig)\n', (2471, 2483), False, 'import yaml\n'), ((2828, 2839), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2836, 2839), False, 'import sys\n'), ((2977, 3021), 'classes.util.ItemLog', 'util.ItemLog', (['args.ignore', 'fieldnames', '"""uri"""'], {}), "(args.ignore, fieldnames, 'uri')\n", (2989, 3021), False, 'from classes import pcdm, ocr, util\n'), ((3143, 3154), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3151, 3154), False, 'import sys\n'), ((3872, 3883), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3880, 3883), False, 'import sys\n'), ((3966, 3983), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3981, 3983), False, 'from datetime import datetime\n')] |
import requests
import json
def file_sync(target, file_name):
token = 'Schedule0350c8c75ddcd9fafdaA9738df4c9346bec48dc9c4915'
url = 'http://127.0.0.1:10011/api/v1/schedule/file_sync/'
data = {"target": target, "file_name": file_name}
r = requests.get(url, data=json.dumps(data),
headers={'Content-Type': 'application/json', 'token': token}).json()
print(r)
def remote_command():
target = '127.0.0.1'
command = 'command'
script_name = 'tests'
args = ('a', 'b')
kwargs = {"a": "a"}
token = 'Schedule0350c8c75ddcd9fafdaA9738df4c9346bec48dc9c4915'
url = 'http://127.0.0.1:10011/api/v1/schedule/command/'
data = {"fun_name": 'func', "command": command, "target": target, "script_name": script_name,
"args": args
}
r = requests.get(url, data=json.dumps(data),
headers={'Content-Type': 'application/json', 'token': token}).json()
print(r)
if __name__ == '__main__':
# remote_command()
file_sync(target="127.0.0.1", file_name="tests.py")
| [
"json.dumps"
] | [((279, 295), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (289, 295), False, 'import json\n'), ((838, 854), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (848, 854), False, 'import json\n')] |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import heapq
import numpy as np
from math import sqrt
from .spline_segment import SplineSegment
class SegmentList(object):
def __init__(self, closest_point_search_accuracy=0.001, closest_point_search_max_iterations=5000, segments=None):
self.segments = segments
self.closest_point_search_accuracy = closest_point_search_accuracy
self.closest_point_search_max_iterations = closest_point_search_max_iterations
def construct_from_spline(self, spline, min_arc_length=0, max_arc_length=-1, granularity=1000):
""" Constructs line segments out of the evualated points
with the given granularity
Returns
-------
* segments : list of tuples
Each entry defines a line segment and contains
start,center and end points
Returns
-------
True if successful else if not
"""
points = []
step_size = 1.0 / granularity
if max_arc_length <= 0:
max_arc_length = spline.full_arc_length
if abs(min_arc_length-max_arc_length) > step_size:
u = 0
while u <= 1.0:
arc_length = spline.get_absolute_arc_length(u)
# TODO make more efficient by looking up min_u
if arc_length >= min_arc_length and arc_length <= max_arc_length:
point = spline.query_point_by_parameter(u)
points.append(point)
u += step_size
self.segments = []
index = 0
while index < len(points) - 1:
start = np.array(points[index])
end = np.array(points[index + 1])
center = 0.5 * (end - start) + start
segment = SplineSegment(start, center, end)
self.segments.append(segment)
index += 1
return index > 0
else:
return False
def find_closest_point(self, point):
if self.segments is None or len(self.segments) == 0:
return None, -1
candidates = self.find_two_closest_segments(point)
if len(candidates) >= 2:
closest_point_1, distance_1 = self._find_closest_point_on_segment(candidates[0][1], point)
closest_point_2, distance_2 = self._find_closest_point_on_segment(candidates[1][1], point)
if distance_1 < distance_2:
return closest_point_1, distance_1
else:
return closest_point_2, distance_2
elif len(candidates) == 1:
closest_point, distance = self._find_closest_point_on_segment(candidates[0][1], point)
return closest_point, distance
def find_closest_segment(self, point):
"""
Returns
-------
* closest_segment : Tuple
Defines line segment. Contains start,center and end
* min_distance : float
distance to this segments center
"""
closest_segment = None
min_distance = np.inf
for s in self.segments:
distance = np.linalg.norm(s.center-point)
if distance < min_distance:
closest_segment = s
min_distance = distance
return closest_segment, min_distance
def find_two_closest_segments(self, point):
""" Ueses a heap queue to find the two closest segments
Returns
-------
* closest_segments : List of Tuples
distance to the segment center
Defineiation of a line segment. Contains start,center and end points
"""
heap = [] # heap queue
idx = 0
while idx < len(self.segments):
distance = np.linalg.norm(self.segments[idx].center-point)
# print point,distance,segments[index]
# #Push the value item onto the heap, maintaining the heap invariant.
heapq.heappush(heap, (distance, idx))
idx += 1
closest_segments = []
count = 0
while idx-count > 0 and count < 2:
distance, index = heapq.heappop(heap)
segment = (distance, self.segments[index])
closest_segments.append(segment)
count += 1
return closest_segments
def _find_closest_point_on_segment(self, segment, point):
""" Find closest point by dividing the segment until the
difference in the distance gets smaller than the accuracy
Returns
-------
* closest_point : np.ndarray
point on the spline
* distance : float
distance to input point
"""
segment_length = np.inf
distance = np.inf
segment_list = SegmentList(self.closest_point_search_accuracy, self.closest_point_search_max_iterations, segment.divide())
iteration = 0
while segment_length > self.closest_point_search_accuracy and distance > self.closest_point_search_accuracy and iteration < self.closest_point_search_max_iterations:
closest_segment, distance = segment_list.find_closest_segment(point)
segment_length = np.linalg.norm(closest_segment.end-closest_segment.start)
segment_list = SegmentList(self.closest_point_search_accuracy, self.closest_point_search_max_iterations, closest_segment.divide())
iteration += 1
closest_point = closest_segment.center # extract center of closest segment
return closest_point, distance
| [
"numpy.array",
"heapq.heappush",
"heapq.heappop",
"numpy.linalg.norm"
] | [((4192, 4224), 'numpy.linalg.norm', 'np.linalg.norm', (['(s.center - point)'], {}), '(s.center - point)\n', (4206, 4224), True, 'import numpy as np\n'), ((4819, 4868), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.segments[idx].center - point)'], {}), '(self.segments[idx].center - point)\n', (4833, 4868), True, 'import numpy as np\n'), ((5010, 5047), 'heapq.heappush', 'heapq.heappush', (['heap', '(distance, idx)'], {}), '(heap, (distance, idx))\n', (5024, 5047), False, 'import heapq\n'), ((5191, 5210), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (5204, 5210), False, 'import heapq\n'), ((6300, 6359), 'numpy.linalg.norm', 'np.linalg.norm', (['(closest_segment.end - closest_segment.start)'], {}), '(closest_segment.end - closest_segment.start)\n', (6314, 6359), True, 'import numpy as np\n'), ((2711, 2734), 'numpy.array', 'np.array', (['points[index]'], {}), '(points[index])\n', (2719, 2734), True, 'import numpy as np\n'), ((2757, 2784), 'numpy.array', 'np.array', (['points[index + 1]'], {}), '(points[index + 1])\n', (2765, 2784), True, 'import numpy as np\n')] |
from django_summernote.admin import SummernoteModelAdmin
from django.contrib.postgres import fields
from django_json_widget.widgets import JSONEditorWidget
from django.contrib import admin
from attendees.occasions.models import *
from attendees.whereabouts.models import *
from .models import *
# Register your models here.
class AttendeeAddressInline(admin.StackedInline):
model = AttendeeAddress
extra = 0
class AttendingMeetInline(admin.StackedInline):
model = AttendingMeet
extra = 0
class RelationshipInline(admin.TabularInline):
model = Relationship
fk_name = 'from_attendee'
extra = 0
class FamilyAttendeeInline(admin.TabularInline):
model = FamilyAttendee
extra = 0
class CategoryAdmin(admin.ModelAdmin):
readonly_fields = ['id', 'created', 'modified']
prepopulated_fields = {"slug": ("display_name",)}
list_display = ('id', 'display_name', 'slug', 'display_order', 'description', 'modified')
class FamilyAdmin(admin.ModelAdmin):
readonly_fields = ['id', 'created', 'modified']
inlines = (FamilyAttendeeInline,)
list_display_links = ('display_name',)
list_display = ('id', 'display_name', 'display_order', 'modified')
fieldsets = (
(None, {"fields": (tuple(['display_name', 'display_order']),
tuple(['id', 'created', 'modified']),
), }),
)
class FamilyAttendeeAdmin(admin.ModelAdmin):
readonly_fields = ['id', 'created', 'modified']
list_display = ('id', 'family', 'attendee', 'role', 'modified')
class RelationAdmin(admin.ModelAdmin):
readonly_fields = ['id', 'created', 'modified']
list_display_links = ('title',)
list_display = ('id', 'title', 'reciprocal_ids', 'emergency_contact', 'scheduler', 'relative', 'display_order')
class AttendeeAdmin(admin.ModelAdmin):
formfield_overrides = {
fields.JSONField: {'widget': JSONEditorWidget},
}
search_fields = ('first_name', 'last_name', 'last_name2', 'first_name2')
readonly_fields = ['id', 'created', 'modified']
inlines = (AttendeeAddressInline, RelationshipInline)
list_display_links = ('last_name',)
list_display = ('id', 'first_name', 'last_name', 'last_name2', 'first_name2', 'progressions', 'infos')
class RegistrationAdmin(admin.ModelAdmin):
formfield_overrides = {
fields.JSONField: {'widget': JSONEditorWidget},
}
list_display_links = ('main_attendee',)
list_display = ('id', 'main_attendee', 'assembly', 'infos', 'modified')
class AttendanceInline(admin.StackedInline):
model = Attendance
extra = 0
class AttendingAdmin(admin.ModelAdmin):
formfield_overrides = {
fields.JSONField: {'widget': JSONEditorWidget},
}
search_fields = ('attendee__first_name', 'attendee__last_name', 'attendee__first_name2', 'attendee__last_name2')
list_display_links = ('attendee',)
readonly_fields = ['id', 'created', 'modified']
inlines = (AttendingMeetInline,) # add AttendanceInline when creating new Attending will fails on meet_names
list_display = ('id', 'registration', 'attendee', 'meet_names', 'finish', 'infos')
class NoteAdmin(SummernoteModelAdmin):
summernote_fields = ('body',)
readonly_fields = ['id', 'created', 'modified']
list_display = ('body', 'content_type', 'object_id', 'content_object', 'display_order', 'modified')
class RelationshipAdmin(admin.ModelAdmin):
list_display_links = ('relation',)
readonly_fields = ['id', 'created', 'modified']
list_display = ('id', 'from_attendee', 'relation', 'to_attendee', 'emergency_contact', 'scheduler', 'in_family', 'finish')
class AttendingMeetAdmin(admin.ModelAdmin):
list_display_links = ('attending',)
readonly_fields = ['id', 'created', 'modified']
list_display = ('id', 'attending', 'meet', 'character', 'category', 'modified')
admin.site.register(Category, CategoryAdmin)
admin.site.register(Note, NoteAdmin)
admin.site.register(Family, FamilyAdmin)
admin.site.register(Attendee, AttendeeAdmin)
admin.site.register(FamilyAttendee, FamilyAttendeeAdmin)
admin.site.register(Registration, RegistrationAdmin)
admin.site.register(Attending, AttendingAdmin)
admin.site.register(Relation, RelationAdmin)
admin.site.register(Relationship, RelationshipAdmin)
admin.site.register(AttendingMeet, AttendingMeetAdmin)
| [
"django.contrib.admin.site.register"
] | [((3869, 3913), 'django.contrib.admin.site.register', 'admin.site.register', (['Category', 'CategoryAdmin'], {}), '(Category, CategoryAdmin)\n', (3888, 3913), False, 'from django.contrib import admin\n'), ((3914, 3950), 'django.contrib.admin.site.register', 'admin.site.register', (['Note', 'NoteAdmin'], {}), '(Note, NoteAdmin)\n', (3933, 3950), False, 'from django.contrib import admin\n'), ((3951, 3991), 'django.contrib.admin.site.register', 'admin.site.register', (['Family', 'FamilyAdmin'], {}), '(Family, FamilyAdmin)\n', (3970, 3991), False, 'from django.contrib import admin\n'), ((3992, 4036), 'django.contrib.admin.site.register', 'admin.site.register', (['Attendee', 'AttendeeAdmin'], {}), '(Attendee, AttendeeAdmin)\n', (4011, 4036), False, 'from django.contrib import admin\n'), ((4037, 4093), 'django.contrib.admin.site.register', 'admin.site.register', (['FamilyAttendee', 'FamilyAttendeeAdmin'], {}), '(FamilyAttendee, FamilyAttendeeAdmin)\n', (4056, 4093), False, 'from django.contrib import admin\n'), ((4094, 4146), 'django.contrib.admin.site.register', 'admin.site.register', (['Registration', 'RegistrationAdmin'], {}), '(Registration, RegistrationAdmin)\n', (4113, 4146), False, 'from django.contrib import admin\n'), ((4147, 4193), 'django.contrib.admin.site.register', 'admin.site.register', (['Attending', 'AttendingAdmin'], {}), '(Attending, AttendingAdmin)\n', (4166, 4193), False, 'from django.contrib import admin\n'), ((4194, 4238), 'django.contrib.admin.site.register', 'admin.site.register', (['Relation', 'RelationAdmin'], {}), '(Relation, RelationAdmin)\n', (4213, 4238), False, 'from django.contrib import admin\n'), ((4239, 4291), 'django.contrib.admin.site.register', 'admin.site.register', (['Relationship', 'RelationshipAdmin'], {}), '(Relationship, RelationshipAdmin)\n', (4258, 4291), False, 'from django.contrib import admin\n'), ((4292, 4346), 'django.contrib.admin.site.register', 'admin.site.register', (['AttendingMeet', 'AttendingMeetAdmin'], {}), '(AttendingMeet, AttendingMeetAdmin)\n', (4311, 4346), False, 'from django.contrib import admin\n')] |
from sklearn.metrics import mean_squared_error, log_loss
from keras.models import Model
from keras.models import load_model
from keras.layers import Input, Dense
from keras.layers.recurrent import SimpleRNN
from keras.layers.merge import multiply, concatenate, add
from keras import backend as K
from keras import initializers
from keras.callbacks import EarlyStopping
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import Callback
from keras import optimizers
import pandas as pd
import numpy as np
from keras.constraints import max_norm, non_neg, unit_norm
np.random.seed(42)
from math import sqrt
import os
import sys
from collections import defaultdict
class DeepAFM:
def __init__(self):
pass
def custom_bce(self, y_true, y_pred):
b = K.not_equal(y_true, -K.ones_like(y_true))
b = K.cast(b, dtype='float32')
ans = K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1) * K.mean(b, axis=-1)
ans = K.cast(ans, dtype='float32')
return K.sum(ans)
def custom_activation(self, x):
if self.activation.split('-')[0] == "custom":
a = float(self.activation.split('-')[1])
return 1.0 / ( 1 + K.exp(-a*x) )
elif self.activation.split('-')[0] == "rounded":
K.minimum(K.maximum(K.round(K.sigmoid(x)), 0), 1)
def custom_init(self, shape, dtype=None):
return K.cast_to_floatx(self.Q_jk_initialize)
def custom_random(self, shape, dtype=None):
if self.random_init == "normal":
return K.random_normal(shape, 0.5, 0.05, dtype=dtype, seed=22)
else:
return K.random_uniform(shape, 0, 1, dtype=dtype, seed=22)
def f(self, x):
def custom_init(shape, dtype=None):
return K.cast_to_floatx(np.reshape(x, shape))
return custom_init
def build(self, dafm_type="dafm-afm", optimizer="rmsprop", learning_rate=0.01, activation="linear", Q_jk_initialize=0, section="", section_count=0, model1="", stateful=False, theta_student="False", student_count=0, binary="False"):
skills = np.shape(Q_jk_initialize)[1]
steps = np.shape(Q_jk_initialize)[0]
self.activation = activation
if '-' in self.activation:
activation = self.custom_activation
if dafm_type.split("_")[-1] == "different":
skills = int( float(dafm_type.split("_")[-2])*skills )
dafm_type = dafm_type.split('_')[0]
if dafm_type.split("_")[0] == "round-fine-tuned":
try:
self.round_threshold = float(dafm_type.split("_")[-1])
dafm_type = dafm_type.split("_")[0]
except:
pass
q_jk_size = skills
if '^' in dafm_type:
q_jk_size = skills
skills = int (float(dafm_type.split('^')[-1]) * skills)
dafm_type = dafm_type.split('^')[0]
self.dafm_type = dafm_type
if dafm_type == "random-uniform" or dafm_type == "random-normal":
qtrainable, finetuning, randomize = True, False, True
self.random_init = dafm_type.split('-')[-1]
elif dafm_type == "dafm-afm":
qtrainable, finetuning, randomize = False, False, False
elif dafm_type == "fine-tuned":
qtrainable, finetuning, randomize = True, True, False
elif dafm_type == "kcinitialize":
qtrainable, finetuning, randomize = True, False, False
elif dafm_type== "round-fine-tuned":
# if not self.round_threshold == -1:
# rounded_Qjk = np.abs(Q_jk1 - Q_jk_initialize)
# Q_jk1[rounded_Qjk <= self.round_threshold] = Q_jk_initialize[rounded_Qjk <= self.round_threshold]
# Q_jk1[rounded_Qjk > self.round_threshold] = np.ones(np.shape(Q_jk_initialize[rounded_Qjk > self.round_threshold])) - Q_jk_initialize[rounded_Qjk > self.round_threshold]
# else:
Q_jk1 = model1.get_layer("Q_jk").get_weights()[0]
Q_jk1 = np.minimum(np.ones(np.shape(Q_jk1)), np.maximum(np.round(Q_jk1), np.zeros(np.shape(Q_jk1))))
model1.get_layer("Q_jk").set_weights([Q_jk1])
return model1
elif dafm_type == "qjk-dense":
qtrainable, finetuning, randomize = False, False, False
activation_dense = activation
elif dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
qtrainable, finetuning, randomize = False, False, True
self.random_init = dafm_type.split('-')[-1]
activation_dense = activation
else:
print ("No Valid Model Found")
sys.exit()
if section == "onehot":
section_input = Input(batch_shape=(None, None, section_count), name='section_input')
if not theta_student=="False":
student_input = Input(batch_shape=(None, None, student_count), name='student_input')
virtual_input1 = Input(batch_shape=(None, None, 1), name='virtual_input1')
if finetuning:
B_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("B_k").get_weights()[0]), use_bias=False), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("T_k").get_weights()[0]), use_bias=False), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=self.f(model1.get_layer("bias").get_weights()[0]), trainable=True), name="bias")(virtual_input1)
else:
B_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=initializers.Zeros(), trainable=True), name="bias")(virtual_input1)
step_input = Input(batch_shape=(None, None, steps), name='step_input')
if randomize:
if binary=="False":
Q_jk = TimeDistributed(Dense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random), trainable=qtrainable ,name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random),trainable=qtrainable, name="Q_jk")(step_input)
else:
if binary=="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize), use_bias=False,trainable=qtrainable), trainable=qtrainable, name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize),trainable=qtrainable,
use_bias=False), name="Q_jk", trainable=qtrainable)(step_input)
if dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
if binary =="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
elif dafm_type == "qjk-dense":
if binary =='False':
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
pass
Qjk_mul_Bk = multiply([Q_jk, B_k])
sum_Qjk_Bk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False,name="sum_Qjk_Bk")(Qjk_mul_Bk)
P_k = SimpleRNN(skills, kernel_initializer=initializers.Identity(), recurrent_initializer=initializers.Identity() , use_bias=False, trainable=False, activation='linear', return_sequences=True, name="P_k")(Q_jk)
Qjk_mul_Pk_mul_Tk = multiply([Q_jk, P_k, T_k])
sum_Qjk_Pk_Tk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False),trainable=False, name="sum_Qjk_Pk_Tk")(Qjk_mul_Pk_mul_Tk)
Concatenate = concatenate([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])
if not (theta_student=="False"):
if finetuning:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("theta").get_weights()[0])), name='theta')(student_input)
else:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='theta')(student_input)
Concatenate = concatenate([Concatenate, theta])
if section == "onehot":
if finetuning:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("S_k").get_weights()[0])), name='S_k')(section_input)
else:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='S_k')(section_input)
Concatenate = concatenate([Concatenate, S_k])
output = TimeDistributed(Dense(1, activation="sigmoid", trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False, name="output")(Concatenate)
if section == "onehot" and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, section_input, student_input], outputs=output)
elif section == "onehot" and theta_student=="False":
model = Model(inputs=[virtual_input1, step_input, section_input], outputs=output)
elif not (section == "onehot") and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, student_input], outputs=output)
else:
model = Model(inputs=[virtual_input1, step_input], outputs=output)
d_optimizer = {"rmsprop":optimizers.RMSprop(lr=learning_rate), "adam":optimizers.Adam(lr=learning_rate), "adagrad":optimizers.Adagrad(lr=learning_rate) }
model.compile( optimizer = d_optimizer[optimizer],
loss = self.custom_bce)
return model
def fit(self, x_train, y_train, x_train_section, x_train_student, x_test, y_test, x_test_section, x_test_student, model, epochs=5, batch_size=32, loaded=False, validation=True):
loss_epoch = {"epoch":[], "loss":[], "val_loss":[], 'patience':[]}
print ("Max Epochs", epochs)
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
patience, epoch = 0 , 1
prev_best_val_loss = np.inf
counter = 0
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
virtual_input1_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if not validation:
earlyStopping = EarlyStopping(monitor='loss', patience=2)
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=epochs , callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
# print ("Epoch Number:", counter, "Patience:", 0, "val loss:", current_val_loss)
loss_epoch["loss"].extend(history_callback.history["loss"])
loss_epoch["val_loss"].extend(history_callback.history["loss"])
loss_epoch["epoch"].extend(list(range(epochs)))
loss_epoch["patience"].extend(list(range(epochs)))
best_model = model
epoch = epochs
else:
while (patience <=5 and epoch <= epochs and (not self.dafm_type == "round-fine-tuned") and (loaded == False)):
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
counter += 1
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section], y_test), verbose=0, shuffle=True)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_student], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section, x_test_student], y_test), verbose=0, shuffle=True)
current_val_loss = history_callback.history["val_loss"][0]
print ("Epoch Number:", counter, "Patience:", patience, "val loss:", current_val_loss)
loss_epoch["val_loss"].append(history_callback.history["val_loss"][0])
loss_epoch["loss"].append(history_callback.history["loss"][0])
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
if (prev_best_val_loss - current_val_loss) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_val_loss
else:
patience += 1
if len(x_train_student)==0:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section]), x_train)
else:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_student]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
print ("PARAM", model_param)
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
B_k = best_model.get_layer("B_k").get_weights()[0]
T_k = best_model.get_layer("T_k").get_weights()[0]
return best_model, AIC, BIC, epoch, loss_epoch
def fit_batches(self, dafmdata_obj, model, max_epochs=30, earlyStop="val_loss", loaded=False):
print ("Max Epochs", max_epochs)
loss_epoch = {"epoch":[], "loss":[], earlyStop:[], 'patience':[]}
patience, epoch = 0, 1
prev_best_val_loss = np.inf
counter = 0
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
while (patience <= 2 and epoch <= max_epochs and loaded==False and (not self.dafm_type == "round-fine-tuned")):
counter += 1
current_val_loss = 0
total_loss, total_train_samples = 0, 0
train = dafmdata_obj.data_generator1("train")
test = dafmdata_obj.data_generator1("test")
bc = 0
for x_train, y_train, x_train_section, x_train_student, batch_size in train:
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
print ("Batch Number:", bc, np.shape(x_train))
if len(x_train_student)==0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, verbose=1)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=1)
total_loss += history_callback.history["loss"][0] * len(x_train)
total_train_samples += len(x_train)
bc += 1
if earlyStop == "rmse":
current_avg_rmse = self.predict_batches(dafmdata_obj, model)
loss_epoch["rmse"].append(current_avg_rmse)
else:
current_avg_rmse = np.mean(self.bce_loss_batches(dafmdata_obj, model, utype="test"))
loss_epoch["val_loss"].append(current_avg_rmse)
loss_epoch["loss"].append(float(total_loss)/float(total_train_samples))
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
print ("Epoch Number:", counter, "Patience:", patience, earlyStop, current_avg_rmse, "Loss:", loss_epoch["loss"][-1])
if (prev_best_val_loss - current_avg_rmse) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_avg_rmse
else:
patience += 1
x = self.bce_loss_batches(dafmdata_obj, best_model, utype="train")
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
return best_model, AIC, BIC, epoch, loss_epoch
def L(self, y_true, y_pred, x_train):
mask_matrix = np.sum(x_train, axis=2).flatten()
num_users, max_responses = np.shape(x_train)[0], np.shape(x_train)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
rmse = []
SSR = 0
response = 0
L = 0
N = 0
c = 0
for user in range(num_users):
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
break
if y_pred[i] < 1 and y_pred[i] > 0:
L += ( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) )
else:
c += 1
eps = 1e-4
if y_pred[i] == y_true[i]:
pass
else:
y_pred[i] = max(eps, min(1 - eps, y_pred[i]))
L += ( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) )
response += 1
N += 1
return L, N
def L_batches(self, dafmdata_obj, model):
L = 0
N = 0
train_generator = dafmdata_obj.data_generator1("train")
for x_train, y_train, x_train_section, x_train_student, batch_size in train_generator:
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
if len(x_train_student)==0:
if len(x_train_section) == 0:
l, x= self.L(y_train, model.predict([virtual_input1, x_train]), x_train)
L += l
else:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_section]), x_train)
L += l
else:
if len(x_train_section) == 0:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_student]), x_train)
L += l
else:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L += l
N += len(x_train)
return L, N
def predict(self, x_test, y_test, x_test_section, x_test_student, model, batch_size=32):
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student)==0:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section] , batch_size=batch_size)
else:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section, x_test_student] , batch_size=batch_size)
rmse = self.rmse_masking(y_test, y_pred, x_test)
return rmse
def prediction(self, x_test, x_test_section, x_test_student, model, batch_size=32):
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student)==0:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section] , batch_size=batch_size)
else:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section, x_test_student], batch_size=batch_size)
return y_pred
def predict_batches(self, dafmdata_obj, model):
test_generator = dafmdata_obj.data_generator1("test")
avg_rmse = 0
t_users = 0
for x_test, y_test, x_test_section, x_test_student, batch_size in test_generator:
avg_rmse = avg_rmse + len(x_test)*self.predict(x_test, y_test, x_test_section, x_test_student, model, batch_size)
t_users = t_users + len(x_test)
return avg_rmse/float(t_users)
def bce_loss_batches(self, dafmdata_obj, model, utype="train"):
ll = []
test_generator = dafmdata_obj.data_generator1(utype)
for x_test, y_test, x_test_section, x_test_student, batch_size in test_generator:
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student) == 0:
if len(x_test_section) == 0:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test], batch_size=batch_size), x_test))
else:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_section], batch_size=batch_size), x_test))
else:
if len(x_test_section) == 0:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size), x_test))
else:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_section, x_test_student], batch_size=batch_size), x_test))
return ll
def bce_loss(self, y_true, y_pred, x_test):
mask_matrix = np.sum(x_test, axis=2).flatten()
num_users, max_responses = np.shape(x_test)[0], np.shape(x_test)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
ll = []
response = 0
for user in range(num_users):
log_loss = []
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
break
response += 1
eps = 1e-7
y_pred[i] = max(eps, min(1 - eps, y_pred[i]))
log_loss.append( -( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) ) )
ll.extend(log_loss)
return ll
def rmse_masking(self, y_true, y_pred, x_test):
mask_matrix = np.sum(x_test, axis=2).flatten()
num_users, max_responses = np.shape(x_test)[0], np.shape(x_test)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
rmse = []
for user in range(num_users):
diff_sq, response = 0, 0
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
continue
# continue for response level evaluation
diff_sq += (y_true[i] - y_pred[i]) ** 2
response += 1
rmse.append(sqrt(diff_sq/float(response)))
return np.mean(rmse)
if __name__ == "__main__":
x_train = [ [ [0, 0, 1], [0, 0, 1], [1, 0, 0], [0, 0, 0] ],
[ [1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0] ],
[ [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1] ],
[ [1, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0] ] ]
x_test = [ [ [ 1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1] ] ]
y_test = [ [ [-1], [-1], [-1], [-1] ] ]
y_train = [ [ [0], [0], [1], [-1] ],
[ [1], [0], [1], [-1] ],
[ [0], [0], [0], [0] ],
[ [0], [1], [0], [0] ] ]
Q_jk_initialize = np.random.rand(3,2)
Q_jk_initialize = np.array([[1, 0], [0, 1], [1, 1]])
obj = DAFM(np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test), Q_jk_initialize, skills=2, steps=3)
model = obj.build(qtrainable=False, finetuning=False, loaded=False, dftype="")
obj.predict(np.array(x_test), np.array(y_test), model)
| [
"numpy.random.rand",
"keras.backend.sum",
"keras.initializers.Identity",
"keras.backend.cast_to_floatx",
"numpy.log",
"numpy.array",
"sys.exit",
"keras.layers.Dense",
"numpy.mean",
"keras.layers.merge.multiply",
"numpy.reshape",
"keras.layers.merge.concatenate",
"keras.initializers.Ones",
... | [((583, 601), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (597, 601), True, 'import numpy as np\n'), ((27585, 27605), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (27599, 27605), True, 'import numpy as np\n'), ((27627, 27661), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [1, 1]]'], {}), '([[1, 0], [0, 1], [1, 1]])\n', (27635, 27661), True, 'import numpy as np\n'), ((845, 871), 'keras.backend.cast', 'K.cast', (['b'], {'dtype': '"""float32"""'}), "(b, dtype='float32')\n", (851, 871), True, 'from keras import backend as K\n'), ((976, 1004), 'keras.backend.cast', 'K.cast', (['ans'], {'dtype': '"""float32"""'}), "(ans, dtype='float32')\n", (982, 1004), True, 'from keras import backend as K\n'), ((1020, 1030), 'keras.backend.sum', 'K.sum', (['ans'], {}), '(ans)\n', (1025, 1030), True, 'from keras import backend as K\n'), ((1401, 1439), 'keras.backend.cast_to_floatx', 'K.cast_to_floatx', (['self.Q_jk_initialize'], {}), '(self.Q_jk_initialize)\n', (1417, 1439), True, 'from keras import backend as K\n'), ((4977, 5034), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, 1)', 'name': '"""virtual_input1"""'}), "(batch_shape=(None, None, 1), name='virtual_input1')\n", (4982, 5034), False, 'from keras.layers import Input, Dense\n'), ((6097, 6154), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, steps)', 'name': '"""step_input"""'}), "(batch_shape=(None, None, steps), name='step_input')\n", (6102, 6154), False, 'from keras.layers import Input, Dense\n'), ((8137, 8158), 'keras.layers.merge.multiply', 'multiply', (['[Q_jk, B_k]'], {}), '([Q_jk, B_k])\n', (8145, 8158), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((8596, 8622), 'keras.layers.merge.multiply', 'multiply', (['[Q_jk, P_k, T_k]'], {}), '([Q_jk, P_k, T_k])\n', (8604, 8622), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((8846, 8898), 'keras.layers.merge.concatenate', 'concatenate', (['[bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk]'], {}), '([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])\n', (8857, 8898), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((26990, 27003), 'numpy.mean', 'np.mean', (['rmse'], {}), '(rmse)\n', (26997, 27003), True, 'import numpy as np\n'), ((27677, 27694), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (27685, 27694), True, 'import numpy as np\n'), ((27696, 27713), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (27704, 27713), True, 'import numpy as np\n'), ((27715, 27731), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (27723, 27731), True, 'import numpy as np\n'), ((27733, 27749), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (27741, 27749), True, 'import numpy as np\n'), ((27886, 27902), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (27894, 27902), True, 'import numpy as np\n'), ((27904, 27920), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (27912, 27920), True, 'import numpy as np\n'), ((943, 961), 'keras.backend.mean', 'K.mean', (['b'], {'axis': '(-1)'}), '(b, axis=-1)\n', (949, 961), True, 'from keras import backend as K\n'), ((1549, 1604), 'keras.backend.random_normal', 'K.random_normal', (['shape', '(0.5)', '(0.05)'], {'dtype': 'dtype', 'seed': '(22)'}), '(shape, 0.5, 0.05, dtype=dtype, seed=22)\n', (1564, 1604), True, 'from keras import backend as K\n'), ((1638, 1689), 'keras.backend.random_uniform', 'K.random_uniform', (['shape', '(0)', '(1)'], {'dtype': 'dtype', 'seed': '(22)'}), '(shape, 0, 1, dtype=dtype, seed=22)\n', (1654, 1689), True, 'from keras import backend as K\n'), ((2095, 2120), 'numpy.shape', 'np.shape', (['Q_jk_initialize'], {}), '(Q_jk_initialize)\n', (2103, 2120), True, 'import numpy as np\n'), ((2140, 2165), 'numpy.shape', 'np.shape', (['Q_jk_initialize'], {}), '(Q_jk_initialize)\n', (2148, 2165), True, 'import numpy as np\n'), ((4746, 4814), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, section_count)', 'name': '"""section_input"""'}), "(batch_shape=(None, None, section_count), name='section_input')\n", (4751, 4814), False, 'from keras.layers import Input, Dense\n'), ((4882, 4950), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, student_count)', 'name': '"""student_input"""'}), "(batch_shape=(None, None, student_count), name='student_input')\n", (4887, 4950), False, 'from keras.layers import Input, Dense\n'), ((9315, 9348), 'keras.layers.merge.concatenate', 'concatenate', (['[Concatenate, theta]'], {}), '([Concatenate, theta])\n', (9326, 9348), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((9746, 9777), 'keras.layers.merge.concatenate', 'concatenate', (['[Concatenate, S_k]'], {}), '([Concatenate, S_k])\n', (9757, 9777), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((10047, 10139), 'keras.models.Model', 'Model', ([], {'inputs': '[virtual_input1, step_input, section_input, student_input]', 'outputs': 'output'}), '(inputs=[virtual_input1, step_input, section_input, student_input],\n outputs=output)\n', (10052, 10139), False, 'from keras.models import Model\n'), ((10585, 10621), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (10603, 10621), False, 'from keras import optimizers\n'), ((10630, 10663), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (10645, 10663), False, 'from keras import optimizers\n'), ((10675, 10711), 'keras.optimizers.Adagrad', 'optimizers.Adagrad', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (10693, 10711), False, 'from keras import optimizers\n'), ((11539, 11580), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'patience': '(2)'}), "(monitor='loss', patience=2)\n", (11552, 11580), False, 'from keras.callbacks import EarlyStopping\n'), ((812, 831), 'keras.backend.ones_like', 'K.ones_like', (['y_true'], {}), '(y_true)\n', (823, 831), True, 'from keras import backend as K\n'), ((893, 930), 'keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (914, 930), True, 'from keras import backend as K\n'), ((1791, 1811), 'numpy.reshape', 'np.reshape', (['x', 'shape'], {}), '(x, shape)\n', (1801, 1811), True, 'import numpy as np\n'), ((10217, 10290), 'keras.models.Model', 'Model', ([], {'inputs': '[virtual_input1, step_input, section_input]', 'outputs': 'output'}), '(inputs=[virtual_input1, step_input, section_input], outputs=output)\n', (10222, 10290), False, 'from keras.models import Model\n'), ((13136, 13175), 'numpy.random.permutation', 'np.random.permutation', (['x_train.shape[0]'], {}), '(x_train.shape[0])\n', (13157, 13175), True, 'import numpy as np\n'), ((16067, 16076), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (16073, 16076), True, 'import numpy as np\n'), ((16237, 16246), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (16243, 16246), True, 'import numpy as np\n'), ((17303, 17342), 'numpy.random.permutation', 'np.random.permutation', (['x_train.shape[0]'], {}), '(x_train.shape[0])\n', (17324, 17342), True, 'import numpy as np\n'), ((19802, 19811), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (19808, 19811), True, 'import numpy as np\n'), ((19935, 19944), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (19941, 19944), True, 'import numpy as np\n'), ((20074, 20097), 'numpy.sum', 'np.sum', (['x_train'], {'axis': '(2)'}), '(x_train, axis=2)\n', (20080, 20097), True, 'import numpy as np\n'), ((20143, 20160), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (20151, 20160), True, 'import numpy as np\n'), ((20165, 20182), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (20173, 20182), True, 'import numpy as np\n'), ((25539, 25561), 'numpy.sum', 'np.sum', (['x_test'], {'axis': '(2)'}), '(x_test, axis=2)\n', (25545, 25561), True, 'import numpy as np\n'), ((25607, 25623), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (25615, 25623), True, 'import numpy as np\n'), ((25628, 25644), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (25636, 25644), True, 'import numpy as np\n'), ((26337, 26359), 'numpy.sum', 'np.sum', (['x_test'], {'axis': '(2)'}), '(x_test, axis=2)\n', (26343, 26359), True, 'import numpy as np\n'), ((26405, 26421), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (26413, 26421), True, 'import numpy as np\n'), ((26426, 26442), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (26434, 26442), True, 'import numpy as np\n'), ((1206, 1219), 'keras.backend.exp', 'K.exp', (['(-a * x)'], {}), '(-a * x)\n', (1211, 1219), True, 'from keras import backend as K\n'), ((5675, 5741), 'keras.layers.Dense', 'Dense', (['skills'], {'activation': '"""linear"""', 'use_bias': '(False)', 'trainable': '(True)'}), "(skills, activation='linear', use_bias=False, trainable=True)\n", (5680, 5741), False, 'from keras.layers import Input, Dense\n'), ((5805, 5871), 'keras.layers.Dense', 'Dense', (['skills'], {'activation': '"""linear"""', 'use_bias': '(False)', 'trainable': '(True)'}), "(skills, activation='linear', use_bias=False, trainable=True)\n", (5810, 5871), False, 'from keras.layers import Input, Dense\n'), ((8399, 8422), 'keras.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (8420, 8422), False, 'from keras import initializers\n'), ((8446, 8469), 'keras.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (8467, 8469), False, 'from keras import initializers\n'), ((10384, 10457), 'keras.models.Model', 'Model', ([], {'inputs': '[virtual_input1, step_input, student_input]', 'outputs': 'output'}), '(inputs=[virtual_input1, step_input, student_input], outputs=output)\n', (10389, 10457), False, 'from keras.models import Model\n'), ((10492, 10550), 'keras.models.Model', 'Model', ([], {'inputs': '[virtual_input1, step_input]', 'outputs': 'output'}), '(inputs=[virtual_input1, step_input], outputs=output)\n', (10497, 10550), False, 'from keras.models import Model\n'), ((11351, 11368), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (11359, 11368), True, 'import numpy as np\n'), ((11373, 11390), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (11381, 11390), True, 'import numpy as np\n'), ((11438, 11454), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (11446, 11454), True, 'import numpy as np\n'), ((11459, 11475), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (11467, 11475), True, 'import numpy as np\n'), ((17571, 17588), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (17579, 17588), True, 'import numpy as np\n'), ((22381, 22397), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (22389, 22397), True, 'import numpy as np\n'), ((22402, 22418), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (22410, 22418), True, 'import numpy as np\n'), ((23230, 23246), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (23238, 23246), True, 'import numpy as np\n'), ((23251, 23267), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (23259, 23267), True, 'import numpy as np\n'), ((6248, 6347), 'keras.layers.Dense', 'Dense', (['q_jk_size'], {'use_bias': '(False)', 'activation': 'activation', 'kernel_initializer': 'self.custom_random'}), '(q_jk_size, use_bias=False, activation=activation, kernel_initializer=\n self.custom_random)\n', (6253, 6347), False, 'from keras.layers import Input, Dense\n'), ((7276, 7393), 'keras.layers.Dense', 'Dense', (['skills'], {'activation': 'activation_dense', 'use_bias': '(False)', 'kernel_initializer': 'self.custom_random', 'trainable': '(True)'}), '(skills, activation=activation_dense, use_bias=False,\n kernel_initializer=self.custom_random, trainable=True)\n', (7281, 7393), False, 'from keras.layers import Input, Dense\n'), ((8262, 8281), 'keras.initializers.Ones', 'initializers.Ones', ([], {}), '()\n', (8279, 8281), False, 'from keras import initializers\n'), ((8729, 8748), 'keras.initializers.Ones', 'initializers.Ones', ([], {}), '()\n', (8746, 8748), False, 'from keras import initializers\n'), ((9213, 9258), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""', 'use_bias': '(False)'}), "(1, activation='linear', use_bias=False)\n", (9218, 9258), False, 'from keras.layers import Input, Dense\n'), ((9646, 9691), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""', 'use_bias': '(False)'}), "(1, activation='linear', use_bias=False)\n", (9651, 9691), False, 'from keras.layers import Input, Dense\n'), ((9879, 9898), 'keras.initializers.Ones', 'initializers.Ones', ([], {}), '()\n', (9896, 9898), False, 'from keras import initializers\n'), ((21408, 21425), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (21416, 21425), True, 'import numpy as np\n'), ((21430, 21447), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (21438, 21447), True, 'import numpy as np\n'), ((24630, 24646), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (24638, 24646), True, 'import numpy as np\n'), ((24651, 24667), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (24659, 24667), True, 'import numpy as np\n'), ((1317, 1329), 'keras.backend.sigmoid', 'K.sigmoid', (['x'], {}), '(x)\n', (1326, 1329), True, 'from keras import backend as K\n'), ((6007, 6027), 'keras.initializers.Zeros', 'initializers.Zeros', ([], {}), '()\n', (6025, 6027), False, 'from keras import initializers\n'), ((17479, 17496), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (17487, 17496), True, 'import numpy as np\n'), ((17501, 17518), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (17509, 17518), True, 'import numpy as np\n'), ((20643, 20660), 'numpy.log', 'np.log', (['y_pred[i]'], {}), '(y_pred[i])\n', (20649, 20660), True, 'import numpy as np\n'), ((20681, 20702), 'numpy.log', 'np.log', (['(1 - y_pred[i])'], {}), '(1 - y_pred[i])\n', (20687, 20702), True, 'import numpy as np\n'), ((21000, 21017), 'numpy.log', 'np.log', (['y_pred[i]'], {}), '(y_pred[i])\n', (21006, 21017), True, 'import numpy as np\n'), ((21038, 21059), 'numpy.log', 'np.log', (['(1 - y_pred[i])'], {}), '(1 - y_pred[i])\n', (21044, 21059), True, 'import numpy as np\n'), ((26147, 26164), 'numpy.log', 'np.log', (['y_pred[i]'], {}), '(y_pred[i])\n', (26153, 26164), True, 'import numpy as np\n'), ((26185, 26206), 'numpy.log', 'np.log', (['(1 - y_pred[i])'], {}), '(1 - y_pred[i])\n', (26191, 26206), True, 'import numpy as np\n'), ((4037, 4052), 'numpy.shape', 'np.shape', (['Q_jk1'], {}), '(Q_jk1)\n', (4045, 4052), True, 'import numpy as np\n'), ((4066, 4081), 'numpy.round', 'np.round', (['Q_jk1'], {}), '(Q_jk1)\n', (4074, 4081), True, 'import numpy as np\n'), ((4674, 4684), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4682, 4684), False, 'import sys\n'), ((7809, 7832), 'keras.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (7830, 7832), False, 'from keras import initializers\n'), ((8017, 8040), 'keras.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (8038, 8040), False, 'from keras import initializers\n'), ((4092, 4107), 'numpy.shape', 'np.shape', (['Q_jk1'], {}), '(Q_jk1)\n', (4100, 4107), True, 'import numpy as np\n')] |
import zengl
from defaults import defaults
from grid import grid_pipeline
from window import Window
window = Window(1280, 720)
ctx = zengl.context()
image = ctx.image(window.size, 'rgba8unorm', samples=4)
depth = ctx.image(window.size, 'depth24plus', samples=4)
image.clear_value = (0.2, 0.2, 0.2, 1.0)
ctx.includes['defaults'] = defaults
grid = grid_pipeline(ctx, [image, depth])
pipeline = ctx.pipeline(
vertex_shader='''
#version 330
#include "defaults"
vec3 vertices[24] = vec3[](
vec3(0.000000, 1.000000, -0.500000),
vec3(0.000000, 1.000000, 0.500000),
vec3(0.500000, 0.866025, -0.500000),
vec3(0.500000, 0.866025, 0.500000),
vec3(0.866025, 0.500000, -0.500000),
vec3(0.866025, 0.500000, 0.500000),
vec3(1.000000, -0.000000, -0.500000),
vec3(1.000000, -0.000000, 0.500000),
vec3(0.866025, -0.500000, -0.500000),
vec3(0.866025, -0.500000, 0.500000),
vec3(0.500000, -0.866025, -0.500000),
vec3(0.500000, -0.866025, 0.500000),
vec3(-0.000000, -1.000000, -0.500000),
vec3(-0.000000, -1.000000, 0.500000),
vec3(-0.500000, -0.866025, -0.500000),
vec3(-0.500000, -0.866025, 0.500000),
vec3(-0.866025, -0.500000, -0.500000),
vec3(-0.866025, -0.500000, 0.500000),
vec3(-1.000000, 0.000000, -0.500000),
vec3(-1.000000, 0.000000, 0.500000),
vec3(-0.866025, 0.500000, -0.500000),
vec3(-0.866025, 0.500000, 0.500000),
vec3(-0.500000, 0.866025, -0.500000),
vec3(-0.500000, 0.866025, 0.500000)
);
vec3 normals[14] = vec3[](
vec3(-0.0000, 1.0000, -0.0000),
vec3(0.5000, 0.8660, -0.0000),
vec3(0.8660, 0.5000, -0.0000),
vec3(1.0000, -0.0000, -0.0000),
vec3(0.8660, -0.5000, -0.0000),
vec3(0.5000, -0.8660, -0.0000),
vec3(-0.0000, -1.0000, -0.0000),
vec3(-0.5000, -0.8660, -0.0000),
vec3(-0.8660, -0.5000, -0.0000),
vec3(-1.0000, -0.0000, -0.0000),
vec3(-0.8660, 0.5000, -0.0000),
vec3(-0.0000, -0.0000, 1.0000),
vec3(-0.5000, 0.8660, -0.0000),
vec3(-0.0000, -0.0000, -1.0000)
);
vec2 texcoords[50] = vec2[](
vec2(1.000000, 0.500000),
vec2(0.000000, 0.500000),
vec2(0.750000, 0.490000),
vec2(1.000000, 1.000000),
vec2(0.250000, 0.490000),
vec2(0.000000, 1.000000),
vec2(0.916667, 0.500000),
vec2(0.870000, 0.457846),
vec2(0.916667, 1.000000),
vec2(0.370000, 0.457846),
vec2(0.833333, 0.500000),
vec2(0.957846, 0.370000),
vec2(0.833333, 1.000000),
vec2(0.457846, 0.370000),
vec2(0.750000, 0.500000),
vec2(0.990000, 0.250000),
vec2(0.750000, 1.000000),
vec2(0.490000, 0.250000),
vec2(0.666667, 0.500000),
vec2(0.957846, 0.130000),
vec2(0.666667, 1.000000),
vec2(0.457846, 0.130000),
vec2(0.583333, 0.500000),
vec2(0.870000, 0.042154),
vec2(0.583333, 1.000000),
vec2(0.370000, 0.042154),
vec2(0.500000, 0.500000),
vec2(0.750000, 0.010000),
vec2(0.500000, 1.000000),
vec2(0.250000, 0.010000),
vec2(0.416667, 0.500000),
vec2(0.630000, 0.042154),
vec2(0.416667, 1.000000),
vec2(0.130000, 0.042154),
vec2(0.333333, 0.500000),
vec2(0.542154, 0.130000),
vec2(0.333333, 1.000000),
vec2(0.042154, 0.130000),
vec2(0.250000, 0.500000),
vec2(0.510000, 0.250000),
vec2(0.250000, 1.000000),
vec2(0.010000, 0.250000),
vec2(0.166667, 0.500000),
vec2(0.542154, 0.370000),
vec2(0.042154, 0.370000),
vec2(0.166667, 1.000000),
vec2(0.083333, 0.500000),
vec2(0.630000, 0.457846),
vec2(0.130000, 0.457846),
vec2(0.083333, 1.000000)
);
int vertex_indices[132] = int[](
1, 2, 0, 3, 4, 2, 5, 6, 4, 7, 8, 6, 9, 10, 8, 11, 12, 10, 13, 14, 12, 15, 16, 14, 17, 18, 16, 19, 20, 18,
21, 13, 5, 21, 22, 20, 23, 0, 22, 6, 14, 22, 1, 3, 2, 3, 5, 4, 5, 7, 6, 7, 9, 8, 9, 11, 10, 11, 13, 12, 13,
15, 14, 15, 17, 16, 17, 19, 18, 19, 21, 20, 5, 3, 1, 1, 23, 21, 21, 19, 17, 17, 15, 13, 13, 11, 9, 9, 7, 5,
5, 1, 21, 21, 17, 13, 13, 9, 5, 21, 23, 22, 23, 1, 0, 22, 0, 2, 2, 4, 6, 6, 8, 10, 10, 12, 14, 14, 16, 18,
18, 20, 22, 22, 2, 6, 6, 10, 14, 14, 18, 22
);
int normal_indices[132] = int[](
0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4, 5, 4, 5, 6, 5, 6, 7, 6, 7, 8, 7, 8, 9, 8, 9, 10, 9, 11, 11, 11, 10,
12, 10, 12, 0, 12, 13, 13, 13, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8,
9, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 10, 12, 12, 12, 0, 0, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
);
int texcoord_indices[132] = int[](
3, 6, 0, 8, 10, 6, 12, 14, 10, 16, 18, 14, 20, 22, 18, 24, 26, 22, 28, 30, 26, 32, 34, 30, 36, 38, 34, 40,
42, 38, 44, 29, 13, 45, 46, 42, 49, 1, 46, 15, 31, 47, 3, 8, 6, 8, 12, 10, 12, 16, 14, 16, 20, 18, 20, 24,
22, 24, 28, 26, 28, 32, 30, 32, 36, 34, 36, 40, 38, 40, 45, 42, 13, 9, 4, 4, 48, 44, 44, 41, 37, 37, 33,
29, 29, 25, 21, 21, 17, 13, 13, 4, 44, 44, 37, 29, 29, 21, 13, 45, 49, 46, 49, 5, 1, 47, 2, 7, 7, 11, 15,
15, 19, 23, 23, 27, 31, 31, 35, 39, 39, 43, 47, 47, 7, 15, 15, 23, 31, 31, 39, 47
);
out vec3 v_vertex;
out vec3 v_normal;
out vec2 v_texcoord;
void main() {
v_vertex = vertices[vertex_indices[gl_VertexID]];
v_normal = normals[normal_indices[gl_VertexID]];
v_texcoord = texcoords[texcoord_indices[gl_VertexID]];
gl_Position = mvp * vec4(v_vertex, 1.0);
}
''',
fragment_shader='''
#version 330
#include "defaults"
in vec3 v_normal;
layout (location = 0) out vec4 out_color;
void main() {
float lum = dot(normalize(light.xyz), normalize(v_normal)) * 0.7 + 0.3;
out_color = vec4(lum, lum, lum, 1.0);
}
''',
framebuffer=[image, depth],
topology='triangles',
cull_face='back',
vertex_count=132,
)
while window.update():
image.clear()
depth.clear()
grid.render()
pipeline.render()
image.blit()
| [
"window.Window",
"grid.grid_pipeline",
"zengl.context"
] | [((111, 128), 'window.Window', 'Window', (['(1280)', '(720)'], {}), '(1280, 720)\n', (117, 128), False, 'from window import Window\n'), ((135, 150), 'zengl.context', 'zengl.context', ([], {}), '()\n', (148, 150), False, 'import zengl\n'), ((351, 385), 'grid.grid_pipeline', 'grid_pipeline', (['ctx', '[image, depth]'], {}), '(ctx, [image, depth])\n', (364, 385), False, 'from grid import grid_pipeline\n')] |