code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
from Tkinter import *
from ttk import Combobox, Label
from tkFont import families
from tkSimpleDialog import Dialog
class PreferencesDialog(Dialog):
def __init__(self, parent, title, font, size):
self._master = parent
self.result = False
self.font = font
self.size = size
Dialog.__init__(self, parent, title)
def body(self, master):
self._npFrame = LabelFrame(master, text='Annotation window text')
self._npFrame.pack(fill=X)
self._fontFrame = Frame(self._npFrame, borderwidth=0)
self._fontLabel = Label(self._fontFrame, text='Font:', width=5)
self._fontLabel.pack(side=LEFT, padx=3)
self._fontCombo = Combobox(self._fontFrame, values=sorted(families()),
state='readonly')
self._fontCombo.pack(side=RIGHT, fill=X)
self._sizeFrame = Frame(self._npFrame, borderwidth=0)
self._sizeLabel = Label(self._sizeFrame, text='Size:', width=5)
self._sizeLabel.pack(side=LEFT, padx=3)
self._sizeCombo = Combobox(self._sizeFrame, values=range(8,15),
state='readonly')
self._sizeCombo.pack(side=RIGHT, fill=X)
self._fontFrame.pack()
self._sizeFrame.pack()
self._npFrame.pack(fill=X)
self._fontCombo.set(self.font)
self._sizeCombo.set(self.size)
def apply(self):
self.font = self._fontCombo.get()
self.size = self._sizeCombo.get()
self.result = True
def cancel(self, event=None):
if self.parent is not None:
self.parent.focus_set()
self.destroy()
| Python |
import web
class Player:
TURN = 0
WAIT_TURN = 1
WIN = 2
LOSE = 3
GUESS = 4
NOT_STARTED = 5
@classmethod
def create(cls, game, word):
p = web.insert('players',
word = word,
game = game,
comments = "",
alphabet_state_1 = 0,
alphabet_state_2 = 0,
game_state = Player.NOT_STARTED)
return cls(p)
@classmethod
def participants(cls, game):
players = web.select('players', where = 'game = $game', order = 'id', vars = locals())
result = []
for p in players:
result.append(cls(p.id))
return result
def __init__(self, id):
players = web.select('players', where = 'id = $id', vars = locals())
for p in players:
self.data_id = int(p.id)
self.data_state = int(p.game_state)
self.data_state_1 = int(p.alphabet_state_1)
self.data_state_2 = int(p.alphabet_state_2)
self.data_word = p.word
self.data_game = int(p.game)
def game(self):
return self.data_game
def id(self):
return self.data_id
def state(self):
return self.data_state
def state_1(self):
return self.data_state_1
def state_2(self):
return self.data_state_2
def word(self):
return self.data_word
def update_state(self, state):
web.update('players',
where = 'id = $id',
game_state = state,
vars = {'id': self.id()})
def set_turn(self):
pass
def set_wait_turn(self):
players = Player.participants(self.game())
if self.state() == Player.WAIT_TURN:
return
found = False
self.update_state(Player.WAIT_TURN)
for p in players:
if found:
p.update_state(Player.TURN)
break
if p.id() == self.id():
found = True
else:
# current player is the last one in the list
assert found
players[0].update_state(Player.TURN)
def set_vote(self):
pass
def set_wait_vote(self):
pass
| Python |
import web
web.config.db_parameters = dict(dbn='sqlite',
db='db\\letters.db') | Python |
import web
import random
from player import Player
urls = ('/', 'index',
'/game([0-9]*)-([0-9]*)', 'game',
'/join', 'join',
'/create', 'create',
'/move', 'move',
'/ajax/game_state/([0-9]*)', 'game_state',
'/ajax/state/([0-9]*)/td([0-9]*)-([0-9]*)', 'state')
render = web.template.render('templates/', cache = False)
class index:
def GET(self):
games = web.select('games')
web.output(render.index(games))
class create:
def POST(self):
i = web.input()
n = web.insert('games', name = 'Game', alphabet = 1, amount = 2)
# p = web.insert('players',
# word = i.word,
# game = n,
# comments = "",
# alphabet_state_1 = 0,
# alphabet_state_2 = 0)
p = Player.create(n, i.word)
# web.update('games', where = 'id = $n', turn = p, vars = locals())
web.seeother('/game%d-%d' % (n, p.id()))
def count(guess, word):
if guess == word:
return '!'
count = 0
for a in guess:
if word.find(a) != -1:
count = count + 1
return count
class game:
def GET(self, game, player):
g = web.select('games', where = 'id = $game', vars = locals())[0]
alphabet = web.select('alphabet', where = 'id = $a', vars = {'a': g.alphabet})[0]
# player_turn = g.turn
# player_me = int(player)
# mine_data_all = web.select('players',
# where = 'game = $game AND id = $player_me',
# vars = locals())
# mine_data = mine_data_all[0]
# word = mine_data['word']
# assert len(word) == 4
me = Player(player)
player_me = me.id()
word = me.word()
guesses = web.select('guesses',
where = 'game = $game AND player = $player_me',
vars = locals())
my_guesses = []
for g in guesses:
my_guesses.append(g.word)
players = Player.participants(game)
p_dict = []
player_turn = -1 # it is possible that no one has a turn
for p in players:
if p.state() == Player.TURN:
player_turn = p.id()
if p.id() == player_me:
continue
his = web.select('guesses', where = 'game = $game AND player = $p',
vars = {'game': game, 'p': p.id()})
p_dict.append({'id': p.id(),
'mine': [count(x, p.word()) for x in my_guesses],
'his' : [(h.word, count(h.word, word)) for h in his]})
letters_1 = me.state_1()
letters_2 = me.state_2()
states = []
alph_utf = alphabet.chars
for i in range(len(alph_utf)):
state = (letters_1 & 1) * 2 + (letters_2 & 1)
letters_1 = letters_1 >> 1
letters_2 = letters_2 >> 1
states += [(alph_utf[i],state, i)]
web.output(render.game(word,
game,
p_dict,
my_guesses,
player_me,
player_turn,
range(len(my_guesses)),
states))
class join:
def POST(self):
i = web.input()
game_dict = {'g': i.game}
already = web.select('players', what = 'COUNT(*) as amount', where = 'game = $g', vars = game_dict)[0]
need = web.select('games', where = 'id = $g', vars = game_dict)[0]
if (already.amount < need.amount):
p = Player.create(i.game, i.word)
if already.amount == need.amount - 1:
web.update('players', where = 'game = $g', game_state = Player.WAIT_TURN, vars = game_dict)
web.update('players', where = 'id = $p', game_state = Player.TURN, vars = {'p': p.id()})
web.seeother('/game%d-%d' % (int(p.game()), p.id()))
else:
assert(False)
class game_state:
def GET(self, player):
player_id = int(player)
g = web.select('players', where = 'id = $player_id', vars = locals())
web.output(g[0].game_state)
class state:
def GET(self, player, number, state):
p = web.select('players', where = 'id = $player', vars = locals())[0]
letters_1 = p['alphabet_state_1']
letters_2 = p['alphabet_state_2']
if (int(state) & 1) == 0:
letters_2 &= (~(1 << int(number)))
else:
letters_2 |= (1 << int(number))
if ((int(state)>>1) & 1) == 0:
letters_1 &= (~(1 << int(number)))
else:
letters_1 |= (1 << int(number))
web.update('players',
where = 'id = $player',
alphabet_state_1 = letters_1,
alphabet_state_2 = letters_2,
vars = locals())
class move:
def POST(self):
i = web.input()
game_dict = dict(game_id = i.game)
# we have to validate, that it is our turn
game = web.select('games',
where = 'id = $game_id',
vars = game_dict)[0]
game_dict['alphabet'] = game.alphabet
game_dict['word'] = i.word
guess = web.insert('guesses',
word = i.word,
game = i.game,
player = i.player)
Player(i.player).set_wait_turn()
web.seeother('/game%d-%d' % (int(i.game), int(i.player)))
web.webapi.internalerror = web.debugerror
if __name__ == '__main__':
random.seed()
import config
web.run(urls, globals(), web.reloader) | Python |
from web import form
guess_word_form = form.Form(form.Textbox("word"),
form.Hidden("game")) | Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
class ExernalLib:
def __init__(self, URL, destiantion, unzip, zipPath):
self.URL = URL
self.destination = destiantion
self.unzip = unzip
self.zipPath = zipPath
externalLibs = [
ExernalLib("http://www.st.com/internet/com/SOFTWARE_RESOURCES/SW_COMPONENT/FIRMWARE/stm32f10x_stdperiph_lib.zip", ".\\..\\..\\Libs\\STM32\\ST\\", True, "STM32F10x_StdPeriph_Lib_V3.5.0"),
ExernalLib("http://www.st.com/internet/com/SOFTWARE_RESOURCES/SW_COMPONENT/FIRMWARE/stm32_usb-fs-device_lib.zip", ".\\..\\..\\Libs\\STM32\\ST\\", True, "STM32_USB-FS-Device_Lib_V3.4.0"),
] | Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
import ExternalLibsList as ell
import DownloadFile as dl
import ZIP
import os
import FileUtils as fu
print os.getcwd()
for externalLib in ell.externalLibs:
if externalLib.unzip:
if dl.downloadFile(externalLib.URL, "tempFile"):
ZIP.extract("tempFile", externalLib.zipPath, externalLib.destination)
fu.delete("tempFile")
else:
fu.createFolders(os.path.dirname(externalLib.destination))
dl.downloadFile(externalLib.URL, externalLib.destination)
| Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
import ExternalLibsList as ell
import urllib2
import sys
goneFiles = 0
for externalLib in ell.externalLibs:
print externalLib.URL,
gone = False
try:
ret = urllib2.urlopen(externalLib.URL)
if ret.code == 200:
print "STILL THERE"
else:
gone = True
except:
gone = True
if gone:
print "GONE"
goneFiles = goneFiles + 1
if goneFiles == 1:
print "1 online file missing"
sys.exit(-1)
elif goneFiles > 0:
print "{0:d} online files missing".format(goneFiles)
sys.exit(-1)
else:
print "All files available"
sys.exit(0)
| Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
import datetime
import os
import FileUtils as fu
newBSDLicTemplate = '''Copyright (c) <year>, <copyright holder>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
now = datetime.datetime.now()
year = str(now.year)
copyrightHolder = "pa.eeapai@gmail.com"
organization = "pa.eeapai@gmail.com"
lic = newBSDLicTemplate.replace("<year>", year)
lic = lic.replace("<copyright holder>", copyrightHolder)
lic = lic.replace("<COPYRIGHT HOLDER>", copyrightHolder)
lic = lic.replace("<organization>", organization)
licLines = lic.splitlines()
maxLicLineLength = 0
for line in licLines:
if len(line) > maxLicLineLength: maxLicLineLength = len(line)
class Lang:
def __init__(self, lang, exts, comment):
self.lang = lang
self.exts = exts
self.comment = comment
languages = [Lang("Python" , ["py"] , "## ##"),
Lang("C/C++" , ["c", "cpp", "h"] , "// //"),
Lang("C#" , ["cs"] , "// //"),
Lang("GCC Linker", ["ind"] , "/* */"),
Lang("ASM" , ["s"] , "## ##")
]
def hasLicense(data):
if -1 == data.find(licLines[1], 0, len(lic) * 2): return False
if -1 == data.find(licLines[5], 0, len(lic) * 2): return False
if -1 == data.find(licLines[7], 0, len(lic) * 2): return False
return True
def prependLicense(path, comment, data):
with file(path, 'w') as modified:
commentAtTheEnd = not comment.endswith(" ")
commentExtender = comment[0]
additionalFillerLength = 3
if commentAtTheEnd:
commentExtender = comment[1]
additionalFillerLength = 2
modified.write(comment.replace(" ", (maxLicLineLength + additionalFillerLength) * commentExtender) + "\n")
for licLine in licLines:
line = ""
if commentAtTheEnd:
line = comment.replace(" ", " " + licLine + " " * (maxLicLineLength + 1 - len(licLine))) + "\n"
else:
line = comment.replace(" ", " " + licLine + " " * (maxLicLineLength + 1 - len(licLine))) + commentExtender + "\n"
modified.write(line)
modified.write(comment.replace(" ", (maxLicLineLength + additionalFillerLength) * commentExtender) + "\n" + "\n")
modified.write(data)
def addLicense(path):
ans = raw_input("Add license to \"" + path + "\" [ENTER for NO, anything for YES]:")
if len(ans):
return True
return False
def checkForLicense(path, comment):
with file(path, 'r') as original:
data = original.read()
if hasLicense(data):
print path + " has a license"
return
if addLicense(path):
print "Adding license to: " + path
backupPath = path + ".backup"
fu.copy(path, backupPath)
prependLicense(path, comment, data)
fu.delete(backupPath)
def checkLicenses(path):
for root, dirs, files in os.walk(path):
for file in files:
filePath = os.path.join(root, file)
if not -1 == filePath.find("\\."):
continue
for lang in languages:
for ext in lang.exts:
if filePath.endswith("." + ext):
checkForLicense(filePath, lang.comment)
#checkLicenses("\\Sources\\Common\\")
#checkLicenses("\\Sources\\LinkerScripts\\")
#checkLicenses("\\Sources\\Projects\\")
checkLicenses("\\Sources\\Projects\\Libs\\")
| Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
from zipfile import ZipFile
import FileUtils as fu
import os
def shouldUnzipPath(pathInZip, zipPath):
return zipPath.startswith(pathInZip)
def getPathToUnzip(pathToUnzipRoot, pathInZip, zipPath):
pathFromZip = fu.convertToBackslashes(zipPath[len(pathInZip):])
if '\\' == pathFromZip[0]:
pathFromZip = pathFromZip[1:]
return pathToUnzipRoot + pathFromZip
def extract(pathToZip, pathInZip, pathToUnzip):
if not pathToUnzip.endswith('\\'):
pathToUnzip = pathToUnzip + '\\'
print "Extracting {0} from {1} to {2}".format(pathInZip, pathToZip, pathToUnzip)
try:
z = ZipFile(pathToZip)
for name in z.namelist():
if not shouldUnzipPath(pathInZip, name):
continue
extractDestinationPath = getPathToUnzip(pathToUnzip, pathInZip, name)
if name.endswith('/'):
if not fu.folderExists(extractDestinationPath):
os.makedirs(extractDestinationPath)
else:
print name + " -> " + extractDestinationPath
with open(extractDestinationPath, 'wb') as destinationFile:
destinationFile.write(z.read(name))
print 'SUCCESS'
except:
print 'FAIL'
| Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
import urllib2
import sys
import FileUtils as fu
def getFileSize(remoteFile):
meta = remoteFile.info()
fileSize = -1
try:
fileSize = int(meta['Content-Length'])
except:
return -1
return fileSize
def downloadFile(URL, path):
result = False
with open(path, 'wb') as localFile:
try:
print 'Downloading {0} to {1} '.format(URL, path)
remoteFile = urllib2.urlopen(URL)
fileSize = getFileSize(remoteFile)
fileSizeValid = not (-1 == fileSize)
CHUNK_SIZE = 1024
if fileSizeValid:
perctangeReported = 0
downloadedSize = 0
while True:
data = remoteFile.read(CHUNK_SIZE)
dataSize = len(data)
if dataSize > 0:
localFile.write(data)
downloadedSize = downloadedSize + dataSize
else:
break
downloadedPercentage = ( 100 * downloadedSize) / fileSize
if downloadedPercentage >= perctangeReported + 10:
perctangeReported = perctangeReported + 10
sys.stdout.write('=')
else:
localFile.write(remoteFile.read())
result = True
print ' SUCCESS'
except:
result = False
print ' FAILED'
if not result:
fu.delete(path)
return result
| Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
import os.path
import shutil
def exists(path):
return os.path.exists(path)
def fileExists(path):
return os.path.isfile(path)
def folderExists(path):
return os.path.isdir(path)
def delete(path):
if fileExists(path):
os.remove(path)
elif folderExists(path):
shutil.rmtree(path)
def convertToBackslashes(path):
return path.replace('/', '\\')
def createFolders(path):
if fileExists(path):
return
if folderExists(path):
return
os.makedirs(path)
def copy(src, dest):
shutil.copyfile(src, dest) | Python |
#####################################################################################
## Copyright (c) 2014, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
"""
Writes files in specified folder (and subfolders recursively) to CC3200 SPI FLASH with debugger.
Needs "empty" flash image and compiled monitor image.
Debug object provides basic debug operations, e.g. run, read memory, write memory.
Monitor object uses debug object to run different operations on CPU.
Monitor image is loaded to RAM and ran.
Predefined locations and buffers in monitor image are used for monitor object to monitor
application communication. Two sets of these locations are used to speed up download.
While some chunk is being written to flash next chunk is written to CC3200 buffer.
This is possible because CortexM4 on CC3200 allows memory access with debugger while running.
Operation in CC3200 is done by writing operation index, parameters and buffer to
predefined locations and last triggering the operation with setting state to active in
predefined location. In next cycle nothing is written until state location is not indicating
idle state.
Flash is formated with operations that call low level SPI functions in peripheral library before
writing files with operations that call SimpleLink library functions for flash file access.
Formatting includes writing "empty" flash image which is read out of flash once after
formatting with Uniflash tool.
"""
import os
import isystem.connect as ic
import time
class Debugger:
"""
Provides basic debug functions.
"""
def __init__(self):
"""
Initializes debug object.
Connects to winIDEA.
"""
self.cmgr = ic.ConnectionMgr()
self.cmgr.connectMRU()
self.debug = ic.CDebugFacade(self.cmgr)
def reset(self):
"""
Resets target device.
"""
self.debug.reset()
def run(self):
"""
Puts target device in running.
"""
self.debug.run()
def writeMemory(self, address, buff, length):
"""
Writes a buffer to target device memory.
@param address: Where to start writing.
@param buff: Data to be written.
@param length: Number of bytes to write.
"""
icBuff = ic.VectorBYTE(buff)
self.debug.writeMemory(ic.IConnectDebug.fMonitor | ic.IConnectDebug.fRealTime, # access mode
ic.maPhysicalARM, # memory area
address, # memory address
length, # num of MAUs to write
1, # bytes per MAU
icBuff) # data to write
def readMemory(self, address, length):
"""
Reads memory from target device.
@param address: Where to start reading from.
@param length: Number of bytes to read.
@return: Buffer with read data.
"""
mem = self.debug.readMemory(ic.IConnectDebug.fMonitor | ic.IConnectDebug.fRealTime, ic.maPhysicalARM, address, length, 1)
return mem
def read32(self, address):
"""
Reads 32bit value from target memory.
@param address: Location of value to read.
@return: Read value.
"""
value = self.debug.evaluate(ic.IConnectDebug.fMonitor | ic.IConnectDebug.fRealTime, "*(unsigned long *){0}".format(address)).getInt()
return value
def write32(self, address, val):
"""
Writes 32bit value to target memory.
@param address: Location to write the value.
@param val: Value to write.
"""
valueType = ic.SType()
valueType.m_byType = ic.SType.tUnsigned
valueType.m_byBitSize = 32
value = ic.CValueType(valueType, val)
self.debug.writeValue(ic.IConnectDebug.fMonitor | ic.IConnectDebug.fRealTime,
ic.maPhysicalARM,
address,
value)
def gotoAddress(self, address):
"""
Presets execution address.
@param: Address of new execution address.
"""
self.debug.gotoAddress(ic.maPhysicalARM, address)
class SPIMon:
"""
Executes operations with monitor application running on target.
"""
def __init__(self, pathToMon, dbg):
"""
Initializes constants.
@param pathToMon: File with binary monitor application image.
@param dbg: Debugger object.
"""
self.dbg = dbg
self.monitorPath = pathToMon
self.monhMagic = 0
self.monhLoadAddress = 1
self.monhEntry = 2
self.monhBufferSize = 3
self.monhBufferAddress = 4
self.monhMaxFileNameLength = 5
self.monhFileNameAddressA = 6
self.monhFileNameAddressB = 7
self.monhStatusA = 8
self.monhStatusB = 9
self.monhOpA = 10
self.monhOpB = 11
self.monhParamA0 = 12
self.monhParamB0 = 13
self.monhParamA1 = 14
self.monhParamB1 = 15
self.monhParamA2 = 16
self.monhParamB2 = 17
self.monhParamA3 = 18
self.monhParamB3 = 19
self.opInit = 0
self.opScan = 1
self.opStartSL = 2
self.opCreateFile = 3
self.opCloseFile = 4
self.opWriteFile = 5
self.opStopSL = 6
self.opDeinit = 7
self.stateIDLE = 0
self.stateACTIVE = 1
self.stateERROR = 2
self.opsrcA = 0
self.opsrcB = 1
self.curOpSrc = self.opsrcA
self.monitorMagic = 0
self.monitorLoadAddress = 0
self.monitorEntry = 0
self.monitorOpBufferSize = 0
self.monitorBufferAddressA = 0
self.monitorBufferAddressB = 0
self.monitorMaxFileNameLength = 0
self.monitorFileNameAddressA = 0
self.monitorFileNameAddressB = 0
self.monitorSize = 0
self.monStatusAAddress = 0
self.monStatusBAddress = 0
self.monOpAAddress = 0
self.monOpBAddress = 0
self.monParamA0Address = 0
self.monParamB0Address = 0
self.monParamA1Address = 0
self.monParamB1Address = 0
self.monParamA2Address = 0
self.monParamB2Address = 0
self.monParamA3Address = 0
self.monParamB3Address = 0
self.outBuf = []
def getHeaderDword(self, monhID):
"""
Reads 32bit value from predefined address in monitor image (not from target).
@param monhID: Predefined location index.
@return: Value read from image.
"""
return int.from_bytes(self.monitorImg[monhID * 4 : (monhID * 4) + 4], byteorder='little')
def load(self):
"""
Loads monitor application image from file to target memory and
reads constants from predefined locations in image.
"""
monitorFile = open(self.monitorPath, "rb")
self.monitorImg = monitorFile.read()
monitorFile.close()
self.monitorMagic = self.getHeaderDword(self.monhMagic)
self.monitorLoadAddress = self.getHeaderDword(self.monhLoadAddress)
self.monitorEntry = (self.getHeaderDword(self.monhEntry) >> 1) << 1
self.monitorOpBufferSize = self.getHeaderDword(self.monhBufferSize) >> 1
self.monitorBufferAddressA = self.getHeaderDword(self.monhBufferAddress)
self.monitorBufferAddressB = self.monitorBufferAddressA + self.monitorOpBufferSize
self.monitorMaxFileNameLength = self.getHeaderDword(self.monhMaxFileNameLength)
self.monitorFileNameAddressA = self.getHeaderDword(self.monhFileNameAddressA)
self.monitorFileNameAddressB = self.getHeaderDword(self.monhFileNameAddressB)
self.monitorSize = len(self.monitorImg)
self.outBuf = bytearray(self.monitorSize)
self.monStatusAAddress = self.monitorLoadAddress + (self.monhStatusA << 2)
self.monStatusBAddress = self.monitorLoadAddress + (self.monhStatusB << 2)
self.monOpAAddress = self.monitorLoadAddress + (self.monhOpA << 2)
self.monOpBAddress = self.monitorLoadAddress + (self.monhOpB << 2)
self.monParamA0Address = self.monitorLoadAddress + (self.monhParamA0 << 2)
self.monParamB0Address = self.monitorLoadAddress + (self.monhParamB0 << 2)
self.monParamA1Address = self.monitorLoadAddress + (self.monhParamA1 << 2)
self.monParamB1Address = self.monitorLoadAddress + (self.monhParamB1 << 2)
self.monParamA2Address = self.monitorLoadAddress + (self.monhParamA2 << 2)
self.monParamB2Address = self.monitorLoadAddress + (self.monhParamB2 << 2)
self.monParamA3Address = self.monitorLoadAddress + (self.monhParamA3 << 2)
self.monParamB3Address = self.monitorLoadAddress + (self.monhParamB3 << 2)
def prepare(self):
"""
Runs loaded monitor application from address read from image.
"""
self.dbg.reset()
self.dbg.writeMemory(self.monitorLoadAddress, self.monitorImg, self.monitorSize)
self.dbg.gotoAddress(self.monitorEntry)
self.dbg.run()
self.curOpSrc = self.opsrcA
def readParam(self, monhID):
"""
Reads 32bit value from predefined address in target memory.
@param monhID: Predefined location index.
@return: Value read from target.
"""
address = self.monitorLoadAddress + (monhID << 2)
return self.dbg.read32(address)
def writeParam(self, monhID, val):
"""
Writes 32bit value to predefined address in target memory.
@param monhID: Predefined location index.
@param val: Value to write.
"""
address = self.monitorLoadAddress + (monhID << 2)
self.dbg.write32(address, val)
def doOp(self, op, filename, p0, p1, p2, p3, inBuf, outCount):
"""
Writes parameters to predefined locations in target memory and triggers operation.
@param op: Operation index.
@param filename: None or name of the file.
@param p0: Parameter 0 associated with operation.
@param p1: Parameter 1 associated with operation.
@param p2: Parameter 2 associated with operation.
@param p3: Parameter 3 associated with operation.
@param inBuf: Input data associated with operation. [] if not needed.
@param outCount: Expected number of bytes returned from operation. If specified call blocks until operation is done.
"""
nextOpSrc = self.curOpSrc
if 0 == outCount:
if self.opsrcB == self.curOpSrc:
nextOpSrc = self.opsrcA
else:
nextOpSrc = self.opsrcB
monhStatus = self.monhStatusA
monhOp = self.monhOpA
monhP0 = self.monhParamA0
monhP1 = self.monhParamA1
monhP2 = self.monhParamA2
monhP3 = self.monhParamA3
monitorFileNameAddress = self.monitorFileNameAddressA
bufAddress = self.monitorBufferAddressA
if self.opsrcB == self.curOpSrc:
monhStatus = self.monhStatusB
monhOp = self.monhOpB
monhP0 = self.monhParamB0
monhP1 = self.monhParamB1
monhP2 = self.monhParamB2
monhP3 = self.monhParamB3
monitorFileNameAddress = self.monitorFileNameAddressB
bufAddress = self.monitorBufferAddressB
self.curOpSrc = nextOpSrc
status = self.readParam(monhStatus)
while self.stateACTIVE == status:
status = self.readParam(monhStatus)
if not self.stateIDLE == status:
raise Exception("Monitor error {0}".format(status))
if not filename is None:
fileNameBuff = bytearray(filename + "0", "ascii")
filenameLen = len(fileNameBuff)
if filenameLen >= self.monitorMaxFileNameLength:
raise Exception("File name too long: " + filename)
fileNameBuff[filenameLen - 1] = 0
self.dbg.writeMemory(monitorFileNameAddress, fileNameBuff, filenameLen)
if len(inBuf) > 0:
self.dbg.writeMemory(bufAddress, inBuf, len(inBuf))
self.writeParam(monhP0, p0)
self.writeParam(monhP1, p1)
self.writeParam(monhP2, p2)
self.writeParam(monhP3, p3)
self.writeParam(monhOp, op)
self.writeParam(monhStatus, self.stateACTIVE)
if 0 == outCount:
return []
status = self.readParam(monhStatus)
while self.stateACTIVE == status:
status = self.readParam(monhStatus)
if not self.stateIDLE == status:
raise Exception("Monitor error {0}".format(status))
return self.dbg.readMemory(bufAddress, outCount)
def init(self):
"""
Executes init operation in monitor application running on target.
"""
self.doOp(self.opInit, None, 0, 0, 0, 0, [], 0)
def _scan(self, CSEnableBefore, inBuf, count, CSDisableAfter):
"""
Outputs provided data on SPI and returns received SPI data.
Optionally changes SPI CE signal at the beginning and at the end.
@param CSEnableBefore: True to drive CE low on start.
@param inBuf: Output data.
@param count: Requested output byte count. Unpredictable extra values are written to SPI if this is more than input data byte count.
@param CSDisableAfter: True to drive CE high when done.
@return: SPI received data.
"""
scanCount = len(inBuf)
if count > scanCount:
scanCount = count
flags = 0
if CSEnableBefore:
flags = flags | 1
if CSDisableAfter:
flags = flags | 2
return self.doOp(self.opScan, None, flags, scanCount, 0, 0, inBuf, scanCount)
def readID(self):
"""
Reads flash ID.
@return: SPI ID.
"""
outData = self._scan(True, bytes([0x9F]), 5, True)
id = outData[1] << 24
id = id | (outData[2] << 16)
id = id | (outData[3] << 8)
id = id | outData[4]
return id
def readStatus(self):
"""
Reads flash status register.
@return: Status register.
"""
outData = self._scan(True, bytes([0x05]), 2, True)
return outData[1]
def writeEnable(self):
"""
Sends SPI write enable command.
"""
outData = self._scan(True, bytes([0x06]), 0, True)
status = self.readStatus()
while 0 == (status & 2): # WEL
status = self.readStatus()
def writeDisable(self):
"""
Sends SPI write disable command.
"""
outData = self._scan(True, bytes([0x04]), 0, True)
def eraseAll(self):
"""
Sends SPI mass erase command and pools status until completed.
"""
self.writeEnable()
outData = self._scan(True, bytes([0xC7]), 0, True)
status = self.readStatus()
loopCnt = 0
while 1 == (status & 1): # WIP
status = self.readStatus()
print(".", end="", flush=True)
loopCnt = loopCnt + 1
if 64 == loopCnt:
print("")
loopCnt = 0
time.sleep(0.1)
print("")
self.writeDisable()
def _startRead(self, address):
"""
Sends SPI read command.
@param address: Address in flash where to start reading.
"""
cmd = bytearray(4)
cmd[0] = 0x03
cmd[1] = (address >> 16) & 0xFF
cmd[2] = (address >> 8) & 0xFF
cmd[3] = address & 0xFF
self._scan(True, cmd, 0, False)
def _read(self, count):
"""
Clocks data out of flash in chunks of max size of predefined buffer size.
@param count: Total number of bytes to read.
@return: Data read from flash.
"""
outData = bytearray(0)
read = 0
loopCnt = 0
while read < count:
singleCount = count - read
if singleCount > self.monitorOpBufferSize:
singleCount = self.monitorOpBufferSize
singleData = self._scan(False, [], singleCount, False)
outData[read:read + singleCount] = singleData[0:singleCount]
print(".", end="", flush=True)
loopCnt = loopCnt + 1
if 64 == loopCnt:
print("")
loopCnt = 0
read = read + singleCount
return outData
def _endRead(self):
"""
Disables SPI CS signal.
"""
self._scan(False, [], 0, True)
def read(self, address, count):
"""
Reads raw data from flash.
@param address: Address in flash where to start reading.
@param count: Total number of bytes to read.
@return: Data read from flash.
"""
self._startRead(address)
out = self._read(count)
self._endRead()
return out
def writePage(self, address, data, maxCount):
"""
Writes one SPI page of data in flash.
@param address: Start address of the page in flash.
@param data: Data to write.
@param maxCount: Max number of bytes to write. Mustn't exceed page size.
"""
empty = True
for i in range(maxCount):
if not (data[i] == 0xFF):
empty = False
break
if empty:
return
self.writeEnable()
cmd = bytearray(maxCount + 4)
cmd[0] = 0x02
cmd[1] = (address >> 16) & 0xFF
cmd[2] = (address >> 8) & 0xFF
cmd[3] = address & 0xFF
cmd[4:] = data[0:]
self._scan(True, cmd, 0, True)
status = self.readStatus()
while 1 == (status & 1): # WIP
status = self.readStatus()
self.writeDisable()
pass
def write(self, filePath):
"""
Writes file to beginning of the flash.
@param filePath: Path to flash image file.
"""
file = open(filePath, "rb")
img = file.read()
file.close()
pageSize = 256
fullSize = len(img)
address = 0
loopCnt = 0
while address < fullSize:
size = fullSize - address
if size > pageSize:
size = pageSize
buff = img[address:address + size]
self.writePage(address, buff, size)
address = address + size
print(".", end="", flush=True)
loopCnt = loopCnt + 1
if 64 == loopCnt:
print("")
loopCnt = 0
print("")
def startSL(self):
"""
Starts SimpleLink to enable calls to flash file functions.
"""
self.doOp(self.opStartSL, None, 0, 0, 0, 0, [], 0)
def stopSL(self):
"""
Stops SimpleLink.
"""
self.doOp(self.opStopSL, None, 0, 0, 0, 0, [], 0)
def _createFile(self, name, size):
"""
Creates new file with call to SimpleLink library. Handle to file is remembered in monitor.
@param name: File name. Must start with '/'.
@param size: File size.
"""
self.doOp(self.opCreateFile, name, size, 0, 0, 0, [], 0)
def _writeFile(self, buff, size):
"""
Writes data to last created file.
@param buff: Data to write.
@param size: Size of data. Mustn't exceed monitor buffer size.
"""
self.doOp(self.opWriteFile, None, size, 0, 0, 0, buff, 0)
def _closeFile(self):
"""
Closes last created file.
"""
self.doOp(self.opCloseFile, None, 0, 0, 0, 0, [], 0)
def writeFile(self, path, name):
"""
Writes file to flash.
@param path: Path to file on host.
@param name: Name of file in flash.
"""
print("Writing {0} to {1}".format(path, name))
file = open(path, "rb")
buff = file.read()
file.close()
size = len(buff)
self._createFile(name, size)
left = size
while left > 0:
chunkSize = left
if chunkSize > self.monitorOpBufferSize:
chunkSize = self.monitorOpBufferSize
chunkBuff = buff[size - left:size - left + chunkSize]
self.doOp(self.opWriteFile, None, chunkSize, 0, 0, 0, chunkBuff, 0)
left = left - chunkSize
self._closeFile()
def writeFile(mon, fsRoot, file):
if "\\sys\\" in file:
return
name = file[len(fsRoot):]
mon.writeFile(file, name)
def writeFilesIn(mon, path):
for root, dirs, files in os.walk(path):
if "_session" in root:
continue
for file in files:
if file.endswith(".ucf"):
continue
writeFile(mon, path, os.path.join(root, file))
def doMon(monPath, dumpPath, dumpSize, erase, formatPath, programPath, appPath):
"""
Performs selected SPI flash operations in following order:
Reads flash contents and dumps it in specified file.
Performs raw SPI mass erase.
Writes "empty" image from specified path.
Downloads files from specified folder path and subfolders recursively.
@param monPath: Monitor binary image to load to RAM and run.
<scriptFolder>\..\monitor\eclipse\Debug\CC3200Burn.bin is used if None.
@param dumpPath: Where to dump the content of flash. Dump step is skipped when not given.
@param dumpSize: How much to read from flash. Usually the size of flash in bytes.
Must be given when dumpPath is set.
@param erase: True to erase flash. False to skip erase step.
@param formatPath: Formated image to program in erased flash.
Writing "empty" image is skipped if not provided.
Erase step is implied when provided regardless of erase param.
@param programPath: Path to folder which content (with subfolders recursively) should be programmed.
Programming step is skipped if not provided.
@param appPath: Application binary to be programmed as "/sys/mcuimg.bin".
"""
print("Connecting to winIDEA... ", end="", flush=True)
debugger = Debugger()
print("DONE")
mon = SPIMon(monPath, debugger)
print("Reading monitor image from {0} ... ".format(monPath), end="", flush=True)
mon.load()
print("DONE")
print("Starting debug and writing monitor image to RAM... ", end="", flush=True)
mon.prepare()
print("DONE")
print("Monitor init call... ", end="", flush=True)
mon.init()
print("DONE")
print("Reading flash ID... ", end="", flush=True)
id = mon.readID()
print("DONE. SPI FLASH ID: 0x{0:08X}".format(id))
if dumpSize > 0:
print("Creating {0} to dump raw flash image... ".format(dumpPath), end="", flush=True)
imgFile = open(dumpPath, "wb")
print("DONE")
print("Reading flash image... ")
img = mon.read(0, dumpSize)
count = len(img)
print("Read {0} Bytes".format(count))
print("Dumping data... ", end="", flush=True)
imgFile.write(img)
imgFile.close()
print("DONE")
format = not (formatPath == None)
erase = erase or format
program = not (programPath == None)
if erase:
print("Erasing SPI FLASH... ")
mon.eraseAll()
print("DONE")
if format:
print("Writing \"empty\" image... ")
mon.write(formatPath)
print("DONE")
progApp = not ( appPath == None )
if program or progApp:
print("Starting SimpleLink... ", end="", flush=True)
mon.startSL()
print("DONE")
if program:
writeFilesIn(mon, programPath)
if progApp:
mon.writeFile(appPath, "/sys/mcuimg.bin")
if program or progApp:
print("Stopping SimpleLink... ", end="", flush=True)
mon.stopSL()
print("DONE")
class args:
def __init__(self, argv):
self.monPath = os.path.abspath(os.path.dirname(__file__) + "\\..\\monitor\\eclipse\\Debug\\CC3200Burn.bin")
self.dumpPath = None
self.dumpSize = 0
self.erase = False
self.formatPath = None
self.progPath = None
self.appPath = None
for arg in argv:
if arg.startswith("-M"): self.monPath = os.path.abspath(arg[2:])
elif arg.startswith("-D"): self.dumpPath = os.path.abspath(arg[2:])
elif arg.startswith("-S"): self.dumpSize = int(arg[2:], 0)
elif arg.startswith("-E"): self.erase = True
elif arg.startswith("-F"): self.formatPath = os.path.abspath(arg[2:])
elif arg.startswith("-P"): self.progPath = os.path.abspath(arg[2:])
elif arg.startswith("-A"): self.appPath = os.path.abspath(arg[2:])
else: print("Skipping unknown param: " + arg)
def main():
"""
Usage: CC3200Burn.py [-M<path>] [-D<path> -S<size>] [-E] [-F<path>] [-P<path>] [-A<path>]
-M<path> : Path to monitor image. <scriptFolder>\..\monitor\eclipse\Debug\CC3200Burn.bin is tried if None.
-D<path> : Where to dump raw image.
-S<size> : Number of bytes to dump. Must be given if -D.
-E : Low level SPI flash erase.
-F<path> : Formated image path. Dumped image after erase (-E) and format with Uniflash.
-P<path> : Folder with files to program. /sys/mcuimg.bin is ignored.
-A<path> : Path to application image. Programmed as "/sys/mcuimg.bin".
"""
import sys
if 2 > len(sys.argv):
print(main.__doc__)
return
conf = args(sys.argv[1:])
doMon(conf.monPath, conf.dumpPath, conf.dumpSize, conf.erase, conf.formatPath, conf.progPath, conf.appPath)
if __name__ == "__main__":
main()
| Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
import sys
import os
import shutil
CONFIGURATION = sys.argv[1]
PROJECTPATH = sys.argv[2]
OUTPUTNAME = sys.argv[3]
OUTPUTEXT = sys.argv[4]
if os.path.isfile(os.path.join(PROJECTPATH, "PostBuildUser.py")):
import PostBuildUser
PostBuildUser.postBuild(CONFIGURATION, PROJECTPATH, OUTPUTNAME, OUTPUTEXT)
| Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
import tkFileDialog
import Tkinter
import sys
import os
import shutil
import distutils.dir_util
import datetime
root = Tkinter.Tk()
root.withdraw()
WDKPath = "C:\\WinDDK\\7600.16385.1"
now = datetime.datetime.now()
date = now.strftime("%m/%d/%Y")
name = raw_input("Enter the name (e.g. Awesome Embedded Developer). Don't use characters that are not valid in file names!: ")
certPrepend = name.replace(' ', '')
usbVID = raw_input("Enter your USB VID (4 HEX characters, without 0x or h or hex):")
usbPID = raw_input("Enter USB PID for the device (4 HEX characters, without 0x or h or hex):")
if not os.path.isdir(WDKPath):
WDKPath = tkFileDialog.askdirectory()
if not os.path.isdir(WDKPath):
print "WDK path is needed. You must have Windows Driver Kit (WDK) installed."
sys.exit (-1)
driverPath = "Driver"
driverPath = os.path.abspath(driverPath)
caPVK = certPrepend + "CA.pvk"
caCER = certPrepend + "CA.cer"
spcPVK = certPrepend + "SPC.pvk"
spcCER = certPrepend + "SPC.cer"
spcPFX = certPrepend + "SPC.pfx"
bitVersion = "x86"
MakeCert = os.path.join(WDKPath, "bin\\" + bitVersion + "\\MakeCert.exe")
certutil = "certutil.exe"
pvk2pfx = os.path.join(WDKPath, "bin\\" + bitVersion + "\\pvk2pfx.exe")
chkinf = os.path.join(WDKPath, "Tools\\Chkinf\\chkinf.bat")
Inf2Cat = os.path.join(WDKPath, "bin\\selfsign\\Inf2Cat.exe")
SignTool = os.path.join(WDKPath, "bin\\" + bitVersion + "\\SignTool.exe")
def execute(command, args):
fullCommand = command + " " + args
print "Executing " + fullCommand
return os.system(fullCommand)
# Create certificates
if (not os.path.isfile(caPVK)) or (not os.path.isfile(caCER)):
execute(MakeCert, "-r -pe -n \"CN=" + name +" CA\" -ss CA -sr CurrentUser -a sha1 -sky signature -sv " + caPVK + " " + caCER)
if (not os.path.isfile(caPVK)) or (not os.path.isfile(caCER)):
print "Could not create CA certificate."
sys.exit(-1)
if (not os.path.isfile(spcPVK)) or (not os.path.isfile(spcCER)):
execute(MakeCert, "-pe -n \"CN=" + name +" SPC\" -a sha1 -sky signature -ic " + caCER + " -iv " + caPVK + " -sv " + spcPVK + " " + spcCER)
if (not os.path.isfile(spcPVK)) or (not os.path.isfile(spcCER)):
print "Could not create SPC certificate."
sys.exit(-1)
if not os.path.isfile(spcPFX):
execute(pvk2pfx, "-pvk " + spcPVK + " -spc " + spcCER + " -pfx " + spcPFX) #-po for pfx password
if not os.path.isfile(spcPFX):
print "Could not create PFX."
sys.exit(-1)
# Clean Driver folder
if os.path.isdir(driverPath):
shutil.rmtree(driverPath)
# Copy redistributable files
wdfCoInstPath = os.path.join(WDKPath, "redist\\wdf")
winusbCoInstPath = os.path.join(WDKPath, "redist\\winusb")
dpinstPath = os.path.join(WDKPath, "redist\\DIFx\\dpinst\\MultiLin")
distutils.dir_util.copy_tree(wdfCoInstPath, driverPath)
distutils.dir_util.copy_tree(winusbCoInstPath, driverPath)
distutils.dir_util.copy_tree(dpinstPath, driverPath)
wdfCoInstVer = ""
wdfCoInstName = "WdfCoInstaller"
for filename in os.listdir(os.path.join(wdfCoInstPath, "x86")):
if -1 == filename.find(wdfCoInstName):
continue
filename = filename.replace("_", "")
filename = filename.replace("WdfCoInstaller", "")
filename = filename.replace(".dll", "")
filename = filename.replace("chk", "")
wdfCoInstVer = filename
break
if "" == wdfCoInstVer:
print "Could not determine WdfCoInstaller version."
sys.exit(-1)
wdfMM = int(wdfCoInstVer[:2])
wdfmmm = int(wdfCoInstVer[2:])
kmdfLibVer = "{0}.{1}".format(wdfMM, wdfmmm)
i = int("09")
# Create INF file
infTemplatePath =("WinUSBCommTemplate.inf")
infPath = os.path.join(driverPath, "WinUSBComm.inf")
with file(infTemplatePath, 'r') as infTemplate:
data = infTemplate.read()
data = data.replace("<DATE>", date)
data = data.replace("<USBVID>", usbVID)
data = data.replace("<USBPID>", usbPID)
data = data.replace("<KMDFLIBVER>", kmdfLibVer)
data = data.replace("<WDFCOINSTVER>", wdfCoInstVer)
data = data.replace("<NAME>", name)
with file(infPath, 'w') as inf:
inf.write(data)
# Create usbID.h - ask first
if len(raw_input("Create usbID.h with entered USB IDs (if file exists it will be deleted)? [ENTER for NO, anything for YES]:")):
with file(".\\..\\usbIDTemplate.h", 'r') as usbIDhTemplate:
data = usbIDhTemplate.read()
data = data.replace("<USBVID>", "0x" + usbVID)
data = data.replace("<USBPID>", "0x" + usbPID)
data = data.replace("<NAME>", name)
with file(".\\..\\usbID.h", 'w') as usbIDh:
usbIDh.write(data)
# Check INF file
if os.path.isdir("htm"):
shutil.rmtree("htm")
if 0 != execute(chkinf, infPath):
print "INF file check failed. See .\\htm\\"
sys.exit(-1)
# Create CAT file
catPath = os.path.join(driverPath, "WinUSBComm.cat")
execute(Inf2Cat, "/driver:" + driverPath + " /os:XP_X86,XP_X64,Vista_X86,Server2008_X86,Vista_X64,Server2008_X64,Server2008_IA64,7_X86,7_X64,Server2008R2_X64,Server2008R2_IA64")
if not os.path.isfile(catPath):
print "Could not create catalog file."
sys.exit(-1)
# Sign catalog file
execute(SignTool, "sign /f " + spcPFX + " /t http://timestamp.verisign.com/scripts/timstamp.dll " + catPath)
print "Driver package ready in " + driverPath
# Install driver - ask first
if len(raw_input("Install driver? [ENTER for NO, anything for YES]:")):
if 'PROGRAMFILES(X86)' in os.environ:
bitVersion = "amd64"
dpinstPath = os.path.join(driverPath, bitVersion + "\\dpinst.exe")
execute(dpinstPath, "/q /se /sw /path " + driverPath)
| Python |
#####################################################################################
## Copyright (c) 2012, pa.eeapai@gmail.com ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are met: ##
## * Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## * Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## * Neither the name of the pa.eeapai@gmail.com nor the ##
## names of its contributors may be used to endorse or promote products ##
## derived from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ##
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ##
## DISCLAIMED. IN NO EVENT SHALL pa.eeapai@gmail.com BE LIABLE FOR ANY ##
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ##
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ##
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ##
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ##
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
#####################################################################################
import sys
import os
import shutil
PROJECTPATH = sys.argv[1]
print(PROJECTPATH)
shutil.copyfile(os.path.join(PROJECTPATH + "\\..\\..\\Libs\\STM32\\ST\\Project\\Mass_Storage\\src\\usb_istr.c"), os.path.join(PROJECTPATH + "\\usb_istr.c"))
shutil.copyfile(os.path.join(PROJECTPATH + "\\..\\..\\Libs\\STM32\\ST\\Project\\Mass_Storage\\inc\\usb_istr.h"), os.path.join(PROJECTPATH + "\\usb_istr.h"))
shutil.copyfile(os.path.join(PROJECTPATH + "\\..\\..\\Libs\\STM32\\ST\\Project\\Mass_Storage\\src\\usb_pwr.c"), os.path.join(PROJECTPATH + "\\usb_pwr.c"))
shutil.copyfile(os.path.join(PROJECTPATH + "\\..\\..\\Libs\\STM32\\ST\\Project\\Mass_Storage\\inc\\usb_pwr.h"), os.path.join(PROJECTPATH + "\\usb_pwr.h"))
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c5"
DEFAULT_URL = "http://cheeseshop.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
try:
import setuptools
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
except ImportError:
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
import pkg_resources
try:
pkg_resources.require("setuptools>="+version)
except pkg_resources.VersionConflict, e:
# XXX could we install in a subprocess here?
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first.\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
# tell the user to uninstall obsolete version
use_setuptools(version)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
# bootstrapping setuptools
import ez_setup
ez_setup.use_setuptools()
import os
import sys
import textwrap
from distutils.errors import *
from distutils.command.clean import clean as _clean
from distutils.cmd import Command
from setuptools import setup
from distutils import log
from distutils.core import setup
class clean(_clean):
"""Also cleanup local temp files."""
def run(self):
_clean.run(self)
import fnmatch
# kill temporary files
patterns = [
# generic tempfiles
'*~', '*.bak', '*.pyc',
# tempfiles generated by ANTLR runs
't[0-9]*Lexer.py', 't[0-9]*Parser.py',
'*.tokens', '*__.g',
]
for path in ('antlr3', 'unittests', 'tests'):
path = os.path.join(os.path.dirname(__file__), path)
if os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=True):
graveyard = []
for pat in patterns:
graveyard.extend(fnmatch.filter(files, pat))
for name in graveyard:
filePath = os.path.join(root, name)
try:
log.info("removing '%s'", filePath)
os.unlink(filePath)
except OSError, exc:
log.warn(
"Failed to delete '%s': %s",
filePath, exc
)
class TestError(DistutilsError):
pass
# grml.. the class name appears in the --help output:
# ...
# Options for 'CmdUnitTest' command
# ...
# so I have to use a rather ugly name...
class unittest(Command):
"""Run unit tests for package"""
description = "run unit tests for package"
user_options = [
]
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
testDir = os.path.join(os.path.dirname(__file__), 'unittests')
if not os.path.isdir(testDir):
raise DistutilsFileError(
"There is not 'unittests' directory. Did you fetch the "
"development version?",
)
import glob
import imp
import unittest
import traceback
import StringIO
suite = unittest.TestSuite()
loadFailures = []
# collect tests from all unittests/test*.py files
testFiles = []
for testPath in glob.glob(os.path.join(testDir, 'test*.py')):
testFiles.append(testPath)
testFiles.sort()
for testPath in testFiles:
testID = os.path.basename(testPath)[:-3]
try:
modFile, modPathname, modDescription \
= imp.find_module(testID, [testDir])
testMod = imp.load_module(
testID, modFile, modPathname, modDescription
)
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(testMod)
)
except Exception:
buf = StringIO.StringIO()
traceback.print_exc(file=buf)
loadFailures.append(
(os.path.basename(testPath), buf.getvalue())
)
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
for testName, error in loadFailures:
sys.stderr.write('\n' + '='*70 + '\n')
sys.stderr.write(
"Failed to load test module %s\n" % testName
)
sys.stderr.write(error)
sys.stderr.write('\n')
if not result.wasSuccessful() or loadFailures:
raise TestError(
"Unit test suite failed!",
)
class functest(Command):
"""Run functional tests for package"""
description = "run functional tests for package"
user_options = [
('testcase=', None,
"testcase to run [default: run all]"),
('antlr-version=', None,
"ANTLR version to use [default: HEAD (in ../../build)]"),
]
boolean_options = []
def initialize_options(self):
self.testcase = None
self.antlr_version = 'HEAD'
def finalize_options(self):
pass
def run(self):
import glob
import imp
import unittest
import traceback
import StringIO
testDir = os.path.join(os.path.dirname(__file__), 'tests')
if not os.path.isdir(testDir):
raise DistutilsFileError(
"There is not 'tests' directory. Did you fetch the "
"development version?",
)
# make sure, relative imports from testcases work
sys.path.insert(0, testDir)
rootDir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
if self.antlr_version == 'HEAD':
classpath = [
os.path.join(rootDir, 'build', 'classes'),
os.path.join(rootDir, 'build', 'rtclasses')
]
else:
classpath = [
os.path.join(rootDir, 'archive',
'antlr-%s.jar' % self.antlr_version)
]
classpath.extend([
os.path.join(rootDir, 'lib', 'antlr-2.7.7.jar'),
os.path.join(rootDir, 'lib', 'stringtemplate-3.2.jar'),
os.path.join(rootDir, 'lib', 'junit-4.2.jar')
])
os.environ['CLASSPATH'] = ':'.join(classpath)
os.environ['ANTLRVERSION'] = self.antlr_version
suite = unittest.TestSuite()
loadFailures = []
# collect tests from all tests/t*.py files
testFiles = []
for testPath in glob.glob(os.path.join(testDir, 't*.py')):
if (testPath.endswith('Lexer.py')
or testPath.endswith('Parser.py')
):
continue
# if a single testcase has been selected, filter out all other
# tests
if (self.testcase is not None
and os.path.basename(testPath)[:-3] != self.testcase
):
continue
testFiles.append(testPath)
testFiles.sort()
for testPath in testFiles:
testID = os.path.basename(testPath)[:-3]
try:
modFile, modPathname, modDescription \
= imp.find_module(testID, [testDir])
testMod = imp.load_module(
testID, modFile, modPathname, modDescription
)
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(testMod)
)
except Exception:
buf = StringIO.StringIO()
traceback.print_exc(file=buf)
loadFailures.append(
(os.path.basename(testPath), buf.getvalue())
)
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
for testName, error in loadFailures:
sys.stderr.write('\n' + '='*70 + '\n')
sys.stderr.write(
"Failed to load test module %s\n" % testName
)
sys.stderr.write(error)
sys.stderr.write('\n')
if not result.wasSuccessful() or loadFailures:
raise TestError(
"Functional test suite failed!",
)
setup(name='antlr_python_runtime',
version='3.1',
packages=['antlr3'],
author="Benjamin Niemann",
author_email="pink@odahoda.de",
url="http://www.antlr.org/",
download_url="http://www.antlr.org/download.html",
license="BSD",
description="Runtime package for ANTLR3",
long_description=textwrap.dedent('''\
This is the runtime package for ANTLR3, which is required to use parsers
generated by ANTLR3.
'''),
cmdclass={'unittest': unittest,
'functest': functest,
'clean': clean
},
)
| Python |
import antlr3
import testbase
import unittest
class t025lexerRulePropertyRef(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid1(self):
stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
lexer = self.getLexer(stream)
while True:
token = lexer.nextToken()
if token.type == antlr3.EOF:
break
assert len(lexer.properties) == 3, lexer.properties
text, type, line, pos, index, channel, start, stop = lexer.properties[0]
assert text == 'foobar', lexer.properties[0]
assert type == self.lexerModule.IDENTIFIER, lexer.properties[0]
assert line == 1, lexer.properties[0]
assert pos == 0, lexer.properties[0]
assert index == -1, lexer.properties[0]
assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[0]
assert start == 0, lexer.properties[0]
assert stop == 5, lexer.properties[0]
text, type, line, pos, index, channel, start, stop = lexer.properties[1]
assert text == '_Ab98', lexer.properties[1]
assert type == self.lexerModule.IDENTIFIER, lexer.properties[1]
assert line == 1, lexer.properties[1]
assert pos == 7, lexer.properties[1]
assert index == -1, lexer.properties[1]
assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[1]
assert start == 7, lexer.properties[1]
assert stop == 11, lexer.properties[1]
text, type, line, pos, index, channel, start, stop = lexer.properties[2]
assert text == 'A12sdf', lexer.properties[2]
assert type == self.lexerModule.IDENTIFIER, lexer.properties[2]
assert line == 2, lexer.properties[2]
assert pos == 1, lexer.properties[2]
assert index == -1, lexer.properties[2]
assert channel == antlr3.DEFAULT_CHANNEL, lexer.properties[2]
assert start == 15, lexer.properties[2]
assert stop == 20, lexer.properties[2]
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
import textwrap
class t022scopes(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def parserClass(self, base):
class TParser(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TParser
def testa1(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.a()
def testb1(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
try:
parser.b(False)
self.fail()
except antlr3.RecognitionException:
pass
def testb2(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.b(True)
def testc1(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{
int i;
int j;
i = 0;
}
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
symbols = parser.c()
self.failUnlessEqual(
symbols,
set(['i', 'j'])
)
def testc2(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{
int i;
int j;
i = 0;
x = 4;
}
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
try:
parser.c()
self.fail()
except RuntimeError, exc:
self.failUnlessEqual(exc.args[0], 'x')
def testd1(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{
int i;
int j;
i = 0;
{
int i;
int x;
x = 5;
}
}
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
symbols = parser.d()
self.failUnlessEqual(
symbols,
set(['i', 'j'])
)
def teste1(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{ { { { 12 } } } }
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
res = parser.e()
self.failUnlessEqual(res, 12)
def testf1(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{ { { { 12 } } } }
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
res = parser.f()
self.failUnlessEqual(res, None)
def testf2(self):
cStream = antlr3.StringStream(
textwrap.dedent('''\
{ { 12 } }
'''))
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
res = parser.f()
self.failUnlessEqual(res, None)
if __name__ == '__main__':
unittest.main()
| Python |
# -*- coding: utf-8 -*-
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
import sys
from StringIO import StringIO
class T(testbase.ANTLRTest):
def setUp(self):
self.oldPath = sys.path[:]
sys.path.insert(0, self.baseDir)
def tearDown(self):
sys.path = self.oldPath
def testOverrideMain(self):
grammar = textwrap.dedent(
r"""lexer grammar T3;
options {
language = Python;
}
@main {
def main(argv):
raise RuntimeError("no")
}
ID: ('a'..'z' | '\u00c0'..'\u00ff')+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
try:
lexerMod.main(
['lexer.py']
)
self.fail()
except RuntimeError:
pass
def testLexerFromFile(self):
input = "foo bar"
inputPath = self.writeFile("input.txt", input)
grammar = textwrap.dedent(
r"""lexer grammar T1;
options {
language = Python;
}
ID: 'a'..'z'+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
lexerMod.main(
['lexer.py', inputPath],
stdout=stdout
)
self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
def testLexerFromStdIO(self):
input = "foo bar"
grammar = textwrap.dedent(
r"""lexer grammar T2;
options {
language = Python;
}
ID: 'a'..'z'+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
lexerMod.main(
['lexer.py'],
stdin=StringIO(input),
stdout=stdout
)
self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
def testLexerEncoding(self):
input = u"föö bär".encode('utf-8')
grammar = textwrap.dedent(
r"""lexer grammar T3;
options {
language = Python;
}
ID: ('a'..'z' | '\u00c0'..'\u00ff')+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
lexerMod.main(
['lexer.py', '--encoding', 'utf-8'],
stdin=StringIO(input),
stdout=stdout
)
self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
def testCombined(self):
input = "foo bar"
grammar = textwrap.dedent(
r"""grammar T4;
options {
language = Python;
}
r returns [res]: (ID)+ EOF { $res = $text; };
ID: 'a'..'z'+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
parserMod.main(
['combined.py', '--rule', 'r'],
stdin=StringIO(input),
stdout=stdout
)
stdout = stdout.getvalue()
self.failUnlessEqual(len(stdout.splitlines()), 1, stdout)
def testCombinedOutputAST(self):
input = "foo + bar"
grammar = textwrap.dedent(
r"""grammar T5;
options {
language = Python;
output = AST;
}
r: ID OP^ ID EOF!;
ID: 'a'..'z'+;
OP: '+';
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
parserMod.main(
['combined.py', '--rule', 'r'],
stdin=StringIO(input),
stdout=stdout
)
stdout = stdout.getvalue().strip()
self.failUnlessEqual(stdout, "(+ foo bar)")
def testTreeParser(self):
grammar = textwrap.dedent(
r'''grammar T6;
options {
language = Python;
output = AST;
}
r: ID OP^ ID EOF!;
ID: 'a'..'z'+;
OP: '+';
WS: ' '+ { $channel = HIDDEN; };
''')
treeGrammar = textwrap.dedent(
r'''tree grammar T6Walker;
options {
language=Python;
ASTLabelType=CommonTree;
tokenVocab=T6;
}
r returns [res]: ^(OP a=ID b=ID)
{ $res = "\%s \%s \%s" \% ($a.text, $OP.text, $b.text) }
;
''')
lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True)
stdout = StringIO()
walkerMod.main(
['walker.py', '--rule', 'r', '--parser', 'T6Parser', '--parser-rule', 'r', '--lexer', 'T6Lexer'],
stdin=StringIO("a+b"),
stdout=stdout
)
stdout = stdout.getvalue().strip()
self.failUnlessEqual(stdout, "u'a + b'")
def testTreeParserRewrite(self):
grammar = textwrap.dedent(
r'''grammar T7;
options {
language = Python;
output = AST;
}
r: ID OP^ ID EOF!;
ID: 'a'..'z'+;
OP: '+';
WS: ' '+ { $channel = HIDDEN; };
''')
treeGrammar = textwrap.dedent(
r'''tree grammar T7Walker;
options {
language=Python;
ASTLabelType=CommonTree;
tokenVocab=T7;
output=AST;
}
tokens {
ARG;
}
r: ^(OP a=ID b=ID) -> ^(OP ^(ARG ID) ^(ARG ID));
''')
lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True)
stdout = StringIO()
walkerMod.main(
['walker.py', '--rule', 'r', '--parser', 'T7Parser', '--parser-rule', 'r', '--lexer', 'T7Lexer'],
stdin=StringIO("a+b"),
stdout=stdout
)
stdout = stdout.getvalue().strip()
self.failUnlessEqual(stdout, "(+ (ARG a) (ARG b))")
def testGrammarImport(self):
slave = textwrap.dedent(
r'''
parser grammar T8S;
options {
language=Python;
}
a : B;
''')
parserName = self.writeInlineGrammar(slave)[0]
# slave parsers are imported as normal python modules
# to force reloading current version, purge module from sys.modules
try:
del sys.modules[parserName+'Parser']
except KeyError:
pass
master = textwrap.dedent(
r'''
grammar T8M;
options {
language=Python;
}
import T8S;
s returns [res]: a { $res = $a.text };
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') {self.skip()} ;
''')
stdout = StringIO()
lexerMod, parserMod = self.compileInlineGrammar(master, returnModule=True)
parserMod.main(
['import.py', '--rule', 's'],
stdin=StringIO("b"),
stdout=stdout
)
stdout = stdout.getvalue().strip()
self.failUnlessEqual(stdout, "u'b'")
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t004lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('ffofoofooo')
lexer = self.getLexer(stream)
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 0, token.start
assert token.stop == 0, token.stop
assert token.text == 'f', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 1, token.start
assert token.stop == 2, token.stop
assert token.text == 'fo', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 3, token.start
assert token.stop == 5, token.stop
assert token.text == 'foo', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 6, token.start
assert token.stop == 9, token.stop
assert token.text == 'fooo', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.EOF
def testMalformedInput(self):
stream = antlr3.StringStream('2')
lexer = self.getLexer(stream)
try:
token = lexer.nextToken()
self.fail()
except antlr3.MismatchedTokenException, exc:
self.failUnlessEqual(exc.expecting, 'f')
self.failUnlessEqual(exc.unexpectedType, '2')
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t043synpred(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream(' +foo>')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.a()
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t023scopes(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid1(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.prog()
if __name__ == '__main__':
unittest.main()
| Python |
import os
import sys
import antlr3
import testbase
import unittest
class t021hoist(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid1(self):
cStream = antlr3.StringStream('enum')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.enableEnum = True
enumIs = parser.stat()
assert enumIs == 'keyword', repr(enumIs)
def testValid2(self):
cStream = antlr3.StringStream('enum')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.enableEnum = False
enumIs = parser.stat()
assert enumIs == 'ID', repr(enumIs)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t002lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('01')
lexer = self.getLexer(stream)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.ZERO)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.ONE)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.EOF)
def testMalformedInput(self):
stream = antlr3.StringStream('2')
lexer = self.getLexer(stream)
try:
token = lexer.nextToken()
self.fail()
except antlr3.NoViableAltException, exc:
self.failUnlessEqual(exc.unexpectedType, '2')
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class T(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar(options='-trace')
def lexerClass(self, base):
class TLexer(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self.traces = []
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def parserClass(self, base):
class TParser(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self.traces = []
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
def getRuleInvocationStack(self):
return self._getRuleInvocationStack(base.__module__)
return TParser
def testTrace(self):
cStream = antlr3.StringStream('< 1 + 2 + 3 >')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.a()
self.failUnlessEqual(
lexer.traces,
[ '>T__6', '<T__6', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
'>T__8', '<T__8', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
'>T__8', '<T__8', '>WS', '<WS', '>INT', '<INT', '>WS', '<WS',
'>T__7', '<T__7']
)
self.failUnlessEqual(
parser.traces,
[ '>a', '>synpred1_t044trace_fragment', '<synpred1_t044trace_fragment', '>b', '>c',
'<c', '>c', '<c', '>c', '<c', '<b', '<a' ]
)
def testInvokationStack(self):
cStream = antlr3.StringStream('< 1 + 2 + 3 >')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.a()
self.failUnlessEqual(
parser._stack,
['a', 'b', 'c']
)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t039labels(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream(
'a, b, c, 1, 2 A FOOBAR GNU1 A BLARZ'
)
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
ids, w = parser.a()
assert len(ids) == 6, ids
assert ids[0].text == 'a', ids[0]
assert ids[1].text == 'b', ids[1]
assert ids[2].text == 'c', ids[2]
assert ids[3].text == '1', ids[3]
assert ids[4].text == '2', ids[4]
assert ids[5].text == 'A', ids[5]
assert w.text == 'GNU1', w
if __name__ == '__main__':
unittest.main()
| Python |
import os
import antlr3
import testbase
import unittest
class t019lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid(self):
inputPath = os.path.splitext(__file__)[0] + '.input'
stream = antlr3.StringStream(open(inputPath).read())
lexer = self.getLexer(stream)
while True:
token = lexer.nextToken()
if token.type == antlr3.EOF:
break
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t035ruleLabelPropertyRef(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream(' a a a a ')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
start, stop, text = parser.a()
# first token of rule b is the 2nd token (counting hidden tokens)
assert start.index == 1, start
# first token of rule b is the 7th token (counting hidden tokens)
assert stop.index == 7, stop
assert text == "a a a a", text
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t005lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('fofoofooo')
lexer = self.getLexer(stream)
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 0, token.start
assert token.stop == 1, token.stop
assert token.text == 'fo', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 2, token.start
assert token.stop == 4, token.stop
assert token.text == 'foo', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 5, token.start
assert token.stop == 8, token.stop
assert token.text == 'fooo', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.EOF
def testMalformedInput1(self):
stream = antlr3.StringStream('2')
lexer = self.getLexer(stream)
try:
token = lexer.nextToken()
raise AssertionError
except antlr3.MismatchedTokenException, exc:
assert exc.expecting == 'f', repr(exc.expecting)
assert exc.unexpectedType == '2', repr(exc.unexpectedType)
def testMalformedInput2(self):
stream = antlr3.StringStream('f')
lexer = self.getLexer(stream)
try:
token = lexer.nextToken()
raise AssertionError
except antlr3.EarlyExitException, exc:
assert exc.unexpectedType == antlr3.EOF, repr(exc.unexpectedType)
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import testbase
class t042ast(testbase.ANTLRTest):
## def lexerClass(self, base):
## class TLexer(base):
## def reportError(self, re):
## # no error recovery yet, just crash!
## raise re
## return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def parse(self, text, method, rArgs=[], **kwargs):
self.compileGrammar() #options='-trace')
cStream = antlr3.StringStream(text)
self.lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(self.lexer)
self.parser = self.getParser(tStream)
for attr, val in kwargs.items():
setattr(self.parser, attr, val)
return getattr(self.parser, method)(*rArgs)
def testR1(self):
r = self.parse("1 + 2", 'r1')
self.failUnlessEqual(
r.tree.toStringTree(),
'(+ 1 2)'
)
def testR2a(self):
r = self.parse("assert 2+3;", 'r2')
self.failUnlessEqual(
r.tree.toStringTree(),
'(assert (+ 2 3))'
)
def testR2b(self):
r = self.parse("assert 2+3 : 5;", 'r2')
self.failUnlessEqual(
r.tree.toStringTree(),
'(assert (+ 2 3) 5)'
)
def testR3a(self):
r = self.parse("if 1 fooze", 'r3')
self.failUnlessEqual(
r.tree.toStringTree(),
'(if 1 fooze)'
)
def testR3b(self):
r = self.parse("if 1 fooze else fooze", 'r3')
self.failUnlessEqual(
r.tree.toStringTree(),
'(if 1 fooze fooze)'
)
def testR4a(self):
r = self.parse("while 2 fooze", 'r4')
self.failUnlessEqual(
r.tree.toStringTree(),
'(while 2 fooze)'
)
def testR5a(self):
r = self.parse("return;", 'r5')
self.failUnlessEqual(
r.tree.toStringTree(),
'return'
)
def testR5b(self):
r = self.parse("return 2+3;", 'r5')
self.failUnlessEqual(
r.tree.toStringTree(),
'(return (+ 2 3))'
)
def testR6a(self):
r = self.parse("3", 'r6')
self.failUnlessEqual(
r.tree.toStringTree(),
'3'
)
def testR6b(self):
r = self.parse("3 a", 'r6')
self.failUnlessEqual(
r.tree.toStringTree(),
'3 a'
)
def testR7(self):
r = self.parse("3", 'r7')
self.failUnless(
r.tree is None
)
def testR8(self):
r = self.parse("var foo:bool", 'r8')
self.failUnlessEqual(
r.tree.toStringTree(),
'(var bool foo)'
)
def testR9(self):
r = self.parse("int foo;", 'r9')
self.failUnlessEqual(
r.tree.toStringTree(),
'(VARDEF int foo)'
)
def testR10(self):
r = self.parse("10", 'r10')
self.failUnlessEqual(
r.tree.toStringTree(),
'10.0'
)
def testR11a(self):
r = self.parse("1+2", 'r11')
self.failUnlessEqual(
r.tree.toStringTree(),
'(EXPR (+ 1 2))'
)
def testR11b(self):
r = self.parse("", 'r11')
self.failUnlessEqual(
r.tree.toStringTree(),
'EXPR'
)
def testR12a(self):
r = self.parse("foo", 'r12')
self.failUnlessEqual(
r.tree.toStringTree(),
'foo'
)
def testR12b(self):
r = self.parse("foo, bar, gnurz", 'r12')
self.failUnlessEqual(
r.tree.toStringTree(),
'foo bar gnurz'
)
def testR13a(self):
r = self.parse("int foo;", 'r13')
self.failUnlessEqual(
r.tree.toStringTree(),
'(int foo)'
)
def testR13b(self):
r = self.parse("bool foo, bar, gnurz;", 'r13')
self.failUnlessEqual(
r.tree.toStringTree(),
'(bool foo bar gnurz)'
)
def testR14a(self):
r = self.parse("1+2 int", 'r14')
self.failUnlessEqual(
r.tree.toStringTree(),
'(EXPR (+ 1 2) int)'
)
def testR14b(self):
r = self.parse("1+2 int bool", 'r14')
self.failUnlessEqual(
r.tree.toStringTree(),
'(EXPR (+ 1 2) int bool)'
)
def testR14c(self):
r = self.parse("int bool", 'r14')
self.failUnlessEqual(
r.tree.toStringTree(),
'(EXPR int bool)'
)
def testR14d(self):
r = self.parse("fooze fooze int bool", 'r14')
self.failUnlessEqual(
r.tree.toStringTree(),
'(EXPR fooze fooze int bool)'
)
def testR14e(self):
r = self.parse("7+9 fooze fooze int bool", 'r14')
self.failUnlessEqual(
r.tree.toStringTree(),
'(EXPR (+ 7 9) fooze fooze int bool)'
)
def testR15(self):
r = self.parse("7", 'r15')
self.failUnlessEqual(
r.tree.toStringTree(),
'7 7'
)
def testR16a(self):
r = self.parse("int foo", 'r16')
self.failUnlessEqual(
r.tree.toStringTree(),
'(int foo)'
)
def testR16b(self):
r = self.parse("int foo, bar, gnurz", 'r16')
self.failUnlessEqual(
r.tree.toStringTree(),
'(int foo) (int bar) (int gnurz)'
)
def testR17a(self):
r = self.parse("for ( fooze ; 1 + 2 ; fooze ) fooze", 'r17')
self.failUnlessEqual(
r.tree.toStringTree(),
'(for fooze (+ 1 2) fooze fooze)'
)
def testR18a(self):
r = self.parse("for", 'r18')
self.failUnlessEqual(
r.tree.toStringTree(),
'BLOCK'
)
def testR19a(self):
r = self.parse("for", 'r19')
self.failUnlessEqual(
r.tree.toStringTree(),
'for'
)
def testR20a(self):
r = self.parse("for", 'r20')
self.failUnlessEqual(
r.tree.toStringTree(),
'FOR'
)
def testR21a(self):
r = self.parse("for", 'r21')
self.failUnlessEqual(
r.tree.toStringTree(),
'BLOCK'
)
def testR22a(self):
r = self.parse("for", 'r22')
self.failUnlessEqual(
r.tree.toStringTree(),
'for'
)
def testR23a(self):
r = self.parse("for", 'r23')
self.failUnlessEqual(
r.tree.toStringTree(),
'FOR'
)
def testR24a(self):
r = self.parse("fooze 1 + 2", 'r24')
self.failUnlessEqual(
r.tree.toStringTree(),
'(fooze (+ 1 2))'
)
def testR25a(self):
r = self.parse("fooze, fooze2 1 + 2", 'r25')
self.failUnlessEqual(
r.tree.toStringTree(),
'(fooze (+ 1 2))'
)
def testR26a(self):
r = self.parse("fooze, fooze2", 'r26')
self.failUnlessEqual(
r.tree.toStringTree(),
'(BLOCK fooze fooze2)'
)
def testR27a(self):
r = self.parse("fooze 1 + 2", 'r27')
self.failUnlessEqual(
r.tree.toStringTree(),
'(fooze (fooze (+ 1 2)))'
)
def testR28(self):
r = self.parse("foo28a", 'r28')
self.failUnless(
r.tree is None
)
def testR29(self):
try:
r = self.parse("", 'r29')
self.fail()
except RuntimeError:
pass
# FIXME: broken upstream?
## def testR30(self):
## try:
## r = self.parse("fooze fooze", 'r30')
## self.fail(r.tree.toStringTree())
## except RuntimeError:
## pass
def testR31a(self):
r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=0)
self.failUnlessEqual(
r.tree.toStringTree(),
'(VARDEF gnurz public int (+ 1 2))'
)
def testR31b(self):
r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=1)
self.failUnlessEqual(
r.tree.toStringTree(),
'(VARIABLE gnurz public int (+ 1 2))'
)
def testR31c(self):
r = self.parse("public int gnurz = 1 + 2;", 'r31', flag=2)
self.failUnlessEqual(
r.tree.toStringTree(),
'(FIELD gnurz public int (+ 1 2))'
)
def testR32a(self):
r = self.parse("gnurz 32", 'r32', [1], flag=2)
self.failUnlessEqual(
r.tree.toStringTree(),
'gnurz'
)
def testR32b(self):
r = self.parse("gnurz 32", 'r32', [2], flag=2)
self.failUnlessEqual(
r.tree.toStringTree(),
'32'
)
def testR32c(self):
r = self.parse("gnurz 32", 'r32', [3], flag=2)
self.failUnless(
r.tree is None
)
def testR33a(self):
r = self.parse("public private fooze", 'r33')
self.failUnlessEqual(
r.tree.toStringTree(),
'fooze'
)
def testR34a(self):
r = self.parse("public class gnurz { fooze fooze2 }", 'r34')
self.failUnlessEqual(
r.tree.toStringTree(),
'(class gnurz public fooze fooze2)'
)
def testR34b(self):
r = self.parse("public class gnurz extends bool implements int, bool { fooze fooze2 }", 'r34')
self.failUnlessEqual(
r.tree.toStringTree(),
'(class gnurz public (extends bool) (implements int bool) fooze fooze2)'
)
def testR35(self):
try:
r = self.parse("{ extends }", 'r35')
self.fail()
except RuntimeError:
pass
def testR36a(self):
r = self.parse("if ( 1 + 2 ) fooze", 'r36')
self.failUnlessEqual(
r.tree.toStringTree(),
'(if (EXPR (+ 1 2)) fooze)'
)
def testR36b(self):
r = self.parse("if ( 1 + 2 ) fooze else fooze2", 'r36')
self.failUnlessEqual(
r.tree.toStringTree(),
'(if (EXPR (+ 1 2)) fooze fooze2)'
)
def testR37(self):
r = self.parse("1 + 2 + 3", 'r37')
self.failUnlessEqual(
r.tree.toStringTree(),
'(+ (+ 1 2) 3)'
)
def testR38(self):
r = self.parse("1 + 2 + 3", 'r38')
self.failUnlessEqual(
r.tree.toStringTree(),
'(+ (+ 1 2) 3)'
)
def testR39a(self):
r = self.parse("gnurz[1]", 'r39')
self.failUnlessEqual(
r.tree.toStringTree(),
'(INDEX gnurz 1)'
)
def testR39b(self):
r = self.parse("gnurz(2)", 'r39')
self.failUnlessEqual(
r.tree.toStringTree(),
'(CALL gnurz 2)'
)
def testR39c(self):
r = self.parse("gnurz.gnarz", 'r39')
self.failUnlessEqual(
r.tree.toStringTree(),
'(FIELDACCESS gnurz gnarz)'
)
def testR39d(self):
r = self.parse("gnurz.gnarz.gnorz", 'r39')
self.failUnlessEqual(
r.tree.toStringTree(),
'(FIELDACCESS (FIELDACCESS gnurz gnarz) gnorz)'
)
def testR40(self):
r = self.parse("1 + 2 + 3;", 'r40')
self.failUnlessEqual(
r.tree.toStringTree(),
'(+ 1 2 3)'
)
def testR41(self):
r = self.parse("1 + 2 + 3;", 'r41')
self.failUnlessEqual(
r.tree.toStringTree(),
'(3 (2 1))'
)
def testR42(self):
r = self.parse("gnurz, gnarz, gnorz", 'r42')
self.failUnlessEqual(
r.tree.toStringTree(),
'gnurz gnarz gnorz'
)
def testR43(self):
r = self.parse("gnurz, gnarz, gnorz", 'r43')
self.failUnless(
r.tree is None
)
self.failUnlessEqual(
r.res,
['gnurz', 'gnarz', 'gnorz']
)
def testR44(self):
r = self.parse("gnurz, gnarz, gnorz", 'r44')
self.failUnlessEqual(
r.tree.toStringTree(),
'(gnorz (gnarz gnurz))'
)
def testR45(self):
r = self.parse("gnurz", 'r45')
self.failUnlessEqual(
r.tree.toStringTree(),
'gnurz'
)
def testR46(self):
r = self.parse("gnurz, gnarz, gnorz", 'r46')
self.failUnless(
r.tree is None
)
self.failUnlessEqual(
r.res,
['gnurz', 'gnarz', 'gnorz']
)
def testR47(self):
r = self.parse("gnurz, gnarz, gnorz", 'r47')
self.failUnlessEqual(
r.tree.toStringTree(),
'gnurz gnarz gnorz'
)
def testR48(self):
r = self.parse("gnurz, gnarz, gnorz", 'r48')
self.failUnlessEqual(
r.tree.toStringTree(),
'gnurz gnarz gnorz'
)
def testR49(self):
r = self.parse("gnurz gnorz", 'r49')
self.failUnlessEqual(
r.tree.toStringTree(),
'(gnurz gnorz)'
)
def testR50(self):
r = self.parse("gnurz", 'r50')
self.failUnlessEqual(
r.tree.toStringTree(),
'(1.0 gnurz)'
)
def testR51(self):
r = self.parse("gnurza gnurzb gnurzc", 'r51')
self.failUnlessEqual(
r.res.toStringTree(),
'gnurzb'
)
def testR52(self):
r = self.parse("gnurz", 'r52')
self.failUnlessEqual(
r.res.toStringTree(),
'gnurz'
)
def testR53(self):
r = self.parse("gnurz", 'r53')
self.failUnlessEqual(
r.res.toStringTree(),
'gnurz'
)
def testR54(self):
r = self.parse("gnurza 1 + 2 gnurzb", 'r54')
self.failUnlessEqual(
r.tree.toStringTree(),
'(+ 1 2)'
)
def testR55a(self):
r = self.parse("public private 1 + 2", 'r55')
self.failUnlessEqual(
r.tree.toStringTree(),
'public private (+ 1 2)'
)
def testR55b(self):
r = self.parse("public fooze", 'r55')
self.failUnlessEqual(
r.tree.toStringTree(),
'public fooze'
)
def testR56(self):
r = self.parse("a b c d", 'r56')
self.failUnlessEqual(
r.tree.toStringTree(),
'foo'
)
def testR57(self):
r = self.parse("a b c d", 'r57')
self.failUnlessEqual(
r.tree.toStringTree(),
'foo'
)
def testR59(self):
r = self.parse("a b c fooze", 'r59')
self.failUnlessEqual(
r.tree.toStringTree(),
'(a fooze) (b fooze) (c fooze)'
)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t010lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
lexer = self.getLexer(stream)
token = lexer.nextToken()
assert token.type == self.lexerModule.IDENTIFIER
assert token.start == 0, token.start
assert token.stop == 5, token.stop
assert token.text == 'foobar', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.WS
assert token.start == 6, token.start
assert token.stop == 6, token.stop
assert token.text == ' ', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.IDENTIFIER
assert token.start == 7, token.start
assert token.stop == 11, token.stop
assert token.text == '_Ab98', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.WS
assert token.start == 12, token.start
assert token.stop == 14, token.stop
assert token.text == ' \n ', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.IDENTIFIER
assert token.start == 15, token.start
assert token.stop == 20, token.stop
assert token.text == 'A12sdf', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.EOF
def testMalformedInput(self):
stream = antlr3.StringStream('a-b')
lexer = self.getLexer(stream)
lexer.nextToken()
try:
token = lexer.nextToken()
raise AssertionError, token
except antlr3.NoViableAltException, exc:
assert exc.unexpectedType == '-', repr(exc.unexpectedType)
assert exc.charPositionInLine == 1, repr(exc.charPositionInLine)
assert exc.line == 1, repr(exc.line)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t003lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('0fooze1')
lexer = self.getLexer(stream)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.ZERO)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.FOOZE)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.ONE)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.EOF)
def testMalformedInput(self):
stream = antlr3.StringStream('2')
lexer = self.getLexer(stream)
try:
token = lexer.nextToken()
self.fail()
except antlr3.NoViableAltException, exc:
self.failUnlessEqual(exc.unexpectedType, '2')
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import testbase
class T(testbase.ANTLRTest):
def testbug(self):
self.compileGrammar()
cStream = antlr3.StringStream("public fooze")
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.r()
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t038lexerRuleLabel(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def testValid1(self):
cStream = antlr3.StringStream('a 2')
lexer = self.getLexer(cStream)
while True:
t = lexer.nextToken()
if t.type == antlr3.EOF:
break
print t
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
import sys
class T(testbase.ANTLRTest):
def setUp(self):
self.oldPath = sys.path[:]
sys.path.insert(0, self.baseDir)
def tearDown(self):
sys.path = self.oldPath
def parserClass(self, base):
class TParser(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def lexerClass(self, base):
class TLexer(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input):
# no error recovery yet, just crash!
raise
return TLexer
def execParser(self, grammar, grammarEntry, slaves, input):
for slave in slaves:
parserName = self.writeInlineGrammar(slave)[0]
# slave parsers are imported as normal python modules
# to force reloading current version, purge module from sys.modules
try:
del sys.modules[parserName+'Parser']
except KeyError:
pass
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
getattr(parser, grammarEntry)()
return parser._output
def execLexer(self, grammar, slaves, input):
for slave in slaves:
parserName = self.writeInlineGrammar(slave)[0]
# slave parsers are imported as normal python modules
# to force reloading current version, purge module from sys.modules
try:
del sys.modules[parserName+'Parser']
except KeyError:
pass
lexerCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
while True:
token = lexer.nextToken()
if token is None or token.type == antlr3.EOF:
break
lexer._output += token.text
return lexer._output
def testDelegatorInvokesDelegateRule(self):
slave = textwrap.dedent(
r'''
parser grammar S1;
options {
language=Python;
}
@members {
def capture(self, t):
self.gM1.capture(t)
}
a : B { self.capture("S.a") } ;
''')
master = textwrap.dedent(
r'''
grammar M1;
options {
language=Python;
}
import S1;
s : a ;
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave],
input="b"
)
self.failUnlessEqual("S.a", found)
def testDelegatorInvokesDelegateRuleWithArgs(self):
slave = textwrap.dedent(
r'''
parser grammar S2;
options {
language=Python;
}
@members {
def capture(self, t):
self.gM2.capture(t)
}
a[x] returns [y] : B {self.capture("S.a"); $y="1000";} ;
''')
master = textwrap.dedent(
r'''
grammar M2;
options {
language=Python;
}
import S2;
s : label=a[3] {self.capture($label.y);} ;
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave],
input="b"
)
self.failUnlessEqual("S.a1000", found)
def testDelegatorAccessesDelegateMembers(self):
slave = textwrap.dedent(
r'''
parser grammar S3;
options {
language=Python;
}
@members {
def capture(self, t):
self.gM3.capture(t)
def foo(self):
self.capture("foo")
}
a : B ;
''')
master = textwrap.dedent(
r'''
grammar M3; // uses no rules from the import
options {
language=Python;
}
import S3;
s : 'b' {self.gS3.foo();} ; // gS is import pointer
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave],
input="b"
)
self.failUnlessEqual("foo", found)
def testDelegatorInvokesFirstVersionOfDelegateRule(self):
slave = textwrap.dedent(
r'''
parser grammar S4;
options {
language=Python;
}
@members {
def capture(self, t):
self.gM4.capture(t)
}
a : b {self.capture("S.a");} ;
b : B ;
''')
slave2 = textwrap.dedent(
r'''
parser grammar T4;
options {
language=Python;
}
@members {
def capture(self, t):
self.gM4.capture(t)
}
a : B {self.capture("T.a");} ; // hidden by S.a
''')
master = textwrap.dedent(
r'''
grammar M4;
options {
language=Python;
}
import S4,T4;
s : a ;
B : 'b' ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave, slave2],
input="b"
)
self.failUnlessEqual("S.a", found)
def testDelegatesSeeSameTokenType(self):
slave = textwrap.dedent(
r'''
parser grammar S5; // A, B, C token type order
options {
language=Python;
}
tokens { A; B; C; }
@members {
def capture(self, t):
self.gM5.capture(t)
}
x : A {self.capture("S.x ");} ;
''')
slave2 = textwrap.dedent(
r'''
parser grammar T5;
options {
language=Python;
}
tokens { C; B; A; } /// reverse order
@members {
def capture(self, t):
self.gM5.capture(t)
}
y : A {self.capture("T.y");} ;
''')
master = textwrap.dedent(
r'''
grammar M5;
options {
language=Python;
}
import S5,T5;
s : x y ; // matches AA, which should be "aa"
B : 'b' ; // another order: B, A, C
A : 'a' ;
C : 'c' ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave, slave2],
input="aa"
)
self.failUnlessEqual("S.x T.y", found)
def testDelegatorRuleOverridesDelegate(self):
slave = textwrap.dedent(
r'''
parser grammar S6;
options {
language=Python;
}
@members {
def capture(self, t):
self.gM6.capture(t)
}
a : b {self.capture("S.a");} ;
b : B ;
''')
master = textwrap.dedent(
r'''
grammar M6;
options {
language=Python;
}
import S6;
b : 'b'|'c' ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 'a',
slaves=[slave],
input="c"
)
self.failUnlessEqual("S.a", found)
# LEXER INHERITANCE
def testLexerDelegatorInvokesDelegateRule(self):
slave = textwrap.dedent(
r'''
lexer grammar S7;
options {
language=Python;
}
@members {
def capture(self, t):
self.gM7.capture(t)
}
A : 'a' {self.capture("S.A ");} ;
C : 'c' ;
''')
master = textwrap.dedent(
r'''
lexer grammar M7;
options {
language=Python;
}
import S7;
B : 'b' ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execLexer(
master,
slaves=[slave],
input="abc"
)
self.failUnlessEqual("S.A abc", found)
def testLexerDelegatorRuleOverridesDelegate(self):
slave = textwrap.dedent(
r'''
lexer grammar S8;
options {
language=Python;
}
@members {
def capture(self, t):
self.gM8.capture(t)
}
A : 'a' {self.capture("S.A")} ;
''')
master = textwrap.dedent(
r'''
lexer grammar M8;
options {
language=Python;
}
import S8;
A : 'a' {self.capture("M.A ");} ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execLexer(
master,
slaves=[slave],
input="a"
)
self.failUnlessEqual("M.A a", found)
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import testbase
class T(testbase.ANTLRTest):
def testRewrite(self):
self.compileGrammar()
input = textwrap.dedent(
'''\
method foo() {
i = 3;
k = i;
i = k*4;
}
method bar() {
j = i*2;
}
''')
cStream = antlr3.StringStream(input)
lexer = self.getLexer(cStream)
tStream = antlr3.TokenRewriteStream(lexer)
parser = self.getParser(tStream)
parser.program()
expectedOutput = textwrap.dedent('''\
public class Wrapper {
public void foo() {
int k;
int i;
i = 3;
k = i;
i = k*4;
}
public void bar() {
int j;
j = i*2;
}
}
''')
self.failUnlessEqual(
str(tStream),
expectedOutput
)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t032subrulePredict(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream(
'BEGIN A END'
)
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.a()
@testbase.broken("DFA tries to look beyond end of rule b", Exception)
def testValid2(self):
cStream = antlr3.StringStream(
' A'
)
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.b()
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t014parser(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid(self):
cStream = antlr3.StringStream('var foobar; gnarz(); var blupp; flupp ( ) ;')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.document()
assert len(parser.reportedErrors) == 0, parser.reportedErrors
assert parser.events == [
('decl', 'foobar'),
('call', 'gnarz'),
('decl', 'blupp'),
('call', 'flupp')
], parser.events
def testMalformedInput1(self):
cStream = antlr3.StringStream('var; foo();')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.document()
# FIXME: currently strings with formatted errors are collected
# can't check error locations yet
assert len(parser.reportedErrors) == 1, parser.reportedErrors
assert parser.events == [], parser.events
def testMalformedInput2(self):
cStream = antlr3.StringStream('var foobar(); gnarz();')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.document()
# FIXME: currently strings with formatted errors are collected
# can't check error locations yet
assert len(parser.reportedErrors) == 1, parser.reportedErrors
assert parser.events == [
('call', 'gnarz'),
], parser.events
def testMalformedInput3(self):
cStream = antlr3.StringStream('gnarz(; flupp();')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.document()
# FIXME: currently strings with formatted errors are collected
# can't check error locations yet
assert len(parser.reportedErrors) == 1, parser.reportedErrors
assert parser.events == [
('call', 'gnarz'),
('call', 'flupp'),
], parser.events
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t041parameters(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream('a a a')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
r = parser.a('foo', 'bar')
assert r == ('foo', 'bar'), r
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t027eof(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
@testbase.broken("That's not how EOF is supposed to be used", Exception)
def testValid1(self):
cStream = antlr3.StringStream(' ')
lexer = self.getLexer(cStream)
tok = lexer.nextToken()
assert tok.type == self.lexerModule.SPACE, tok
tok = lexer.nextToken()
assert tok.type == self.lexerModule.END, tok
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t016actions(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid(self):
cStream = antlr3.StringStream("int foo;")
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
name = parser.declaration()
assert name == 'foo', name
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t011lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
lexer = self.getLexer(stream)
token = lexer.nextToken()
assert token.type == self.lexerModule.IDENTIFIER
assert token.start == 0, token.start
assert token.stop == 5, token.stop
assert token.text == 'foobar', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.WS
assert token.start == 6, token.start
assert token.stop == 6, token.stop
assert token.text == ' ', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.IDENTIFIER
assert token.start == 7, token.start
assert token.stop == 11, token.stop
assert token.text == '_Ab98', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.WS
assert token.start == 12, token.start
assert token.stop == 14, token.stop
assert token.text == ' \n ', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.IDENTIFIER
assert token.start == 15, token.start
assert token.stop == 20, token.stop
assert token.text == 'A12sdf', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.EOF
def testMalformedInput(self):
stream = antlr3.StringStream('a-b')
lexer = self.getLexer(stream)
lexer.nextToken()
try:
token = lexer.nextToken()
raise AssertionError, token
except antlr3.NoViableAltException, exc:
assert exc.unexpectedType == '-', repr(exc.unexpectedType)
assert exc.charPositionInLine == 1, repr(exc.charPositionInLine)
assert exc.line == 1, repr(exc.line)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t024finally(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid1(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.prog()
assert events == ['catch', 'finally'], events
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import stringtemplate3
import testbase
import sys
import os
from StringIO import StringIO
class T(testbase.ANTLRTest):
def execParser(self, grammar, grammarEntry, input, group=None):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
if group is not None:
parser.templateLib = group
result = getattr(parser, grammarEntry)()
if result.st is not None:
return result.st.toString()
return None
def testInlineTemplate(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a : ID INT
-> template(id={$ID.text}, int={$INT.text})
"id=<id>, int=<int>"
;
ID : 'a'..'z'+;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc 34"
)
self.failUnlessEqual("id=abc, int=34", found)
def testExternalTemplate(self):
templates = textwrap.dedent(
'''\
group T;
expr(args, op) ::= <<
[<args; separator={<op>}>]
>>
'''
)
group = stringtemplate3.StringTemplateGroup(
file=StringIO(templates),
lexer='angle-bracket'
)
grammar = textwrap.dedent(
r'''grammar T2;
options {
language=Python;
output=template;
}
a : r+=arg OP r+=arg
-> expr(op={$OP.text}, args={$r})
;
arg: ID -> template(t={$ID.text}) "<t>";
ID : 'a'..'z'+;
OP: '+';
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"a + b",
group
)
self.failUnlessEqual("[a+b]", found)
def testEmptyTemplate(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a : ID INT
->
;
ID : 'a'..'z'+;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc 34"
)
self.failUnless(found is None)
def testList(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: (r+=b)* EOF
-> template(r={$r})
"<r; separator=\",\">"
;
b: ID
-> template(t={$ID.text}) "<t>"
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc def ghi"
)
self.failUnlessEqual("abc,def,ghi", found)
def testAction(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: ID
-> { stringtemplate3.StringTemplate("hello") }
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc"
)
self.failUnlessEqual("hello", found)
def testTemplateExpressionInAction(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: ID
{ $st = %{"hello"} }
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc"
)
self.failUnlessEqual("hello", found)
def testTemplateExpressionInAction2(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: ID
{
res = %{"hello <foo>"}
%res.foo = "world";
}
-> { res }
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc"
)
self.failUnlessEqual("hello world", found)
def testIndirectTemplateConstructor(self):
templates = textwrap.dedent(
'''\
group T;
expr(args, op) ::= <<
[<args; separator={<op>}>]
>>
'''
)
group = stringtemplate3.StringTemplateGroup(
file=StringIO(templates),
lexer='angle-bracket'
)
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: ID
{
$st = %({"expr"})(args={[1, 2, 3]}, op={"+"})
}
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc",
group
)
self.failUnlessEqual("[1+2+3]", found)
def testPredicates(self):
grammar = textwrap.dedent(
r'''grammar T3;
options {
language=Python;
output=template;
}
a : ID INT
-> {$ID.text=='a'}? template(int={$INT.text})
"A: <int>"
-> {$ID.text=='b'}? template(int={$INT.text})
"B: <int>"
-> template(int={$INT.text})
"C: <int>"
;
ID : 'a'..'z'+;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"b 34"
)
self.failUnlessEqual("B: 34", found)
def testBacktrackingMode(self):
grammar = textwrap.dedent(
r'''grammar T4;
options {
language=Python;
output=template;
backtrack=true;
}
a : (ID INT)=> ID INT
-> template(id={$ID.text}, int={$INT.text})
"id=<id>, int=<int>"
;
ID : 'a'..'z'+;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc 34"
)
self.failUnlessEqual("id=abc, int=34", found)
def testRewrite(self):
grammar = textwrap.dedent(
r'''grammar T5;
options {
language=Python;
output=template;
rewrite=true;
}
prog: stat+;
stat
: 'if' '(' expr ')' stat
| 'return' return_expr ';'
| '{' stat* '}'
| ID '=' expr ';'
;
return_expr
: expr
-> template(t={$text}) <<boom(<t>)>>
;
expr
: ID
| INT
;
ID: 'a'..'z'+;
INT: '0'..'9'+;
WS: (' '|'\n')+ {$channel=HIDDEN;} ;
COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ;
'''
)
input = textwrap.dedent(
'''\
if ( foo ) {
b = /* bla */ 2;
return 1 /* foo */;
}
/* gnurz */
return 12;
'''
)
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.TokenRewriteStream(lexer)
parser = parserCls(tStream)
result = parser.prog()
found = tStream.toString()
expected = textwrap.dedent(
'''\
if ( foo ) {
b = /* bla */ 2;
return boom(1) /* foo */;
}
/* gnurz */
return boom(12);
'''
)
self.failUnlessEqual(expected, found)
def testTreeRewrite(self):
grammar = textwrap.dedent(
r'''grammar T6;
options {
language=Python;
output=AST;
}
tokens {
BLOCK;
ASSIGN;
}
prog: stat+;
stat
: IF '(' e=expr ')' s=stat
-> ^(IF $e $s)
| RETURN expr ';'
-> ^(RETURN expr)
| '{' stat* '}'
-> ^(BLOCK stat*)
| ID '=' expr ';'
-> ^(ASSIGN ID expr)
;
expr
: ID
| INT
;
IF: 'if';
RETURN: 'return';
ID: 'a'..'z'+;
INT: '0'..'9'+;
WS: (' '|'\n')+ {$channel=HIDDEN;} ;
COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ;
'''
)
treeGrammar = textwrap.dedent(
r'''tree grammar T6Walker;
options {
language=Python;
tokenVocab=T6;
ASTLabelType=CommonTree;
output=template;
rewrite=true;
}
prog: stat+;
stat
: ^(IF expr stat)
| ^(RETURN return_expr)
| ^(BLOCK stat*)
| ^(ASSIGN ID expr)
;
return_expr
: expr
-> template(t={$text}) <<boom(<t>)>>
;
expr
: ID
| INT
;
'''
)
input = textwrap.dedent(
'''\
if ( foo ) {
b = /* bla */ 2;
return 1 /* foo */;
}
/* gnurz */
return 12;
'''
)
lexerCls, parserCls = self.compileInlineGrammar(grammar)
walkerCls = self.compileInlineGrammar(treeGrammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.TokenRewriteStream(lexer)
parser = parserCls(tStream)
tree = parser.prog().tree
nodes = antlr3.tree.CommonTreeNodeStream(tree)
nodes.setTokenStream(tStream)
walker = walkerCls(nodes)
walker.prog()
found = tStream.toString()
expected = textwrap.dedent(
'''\
if ( foo ) {
b = /* bla */ 2;
return boom(1) /* foo */;
}
/* gnurz */
return boom(12);
'''
)
self.failUnlessEqual(expected, found)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t037rulePropertyRef(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream(' a a a a ')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
start, stop, text = parser.a().bla
# first token of rule b is the 2nd token (counting hidden tokens)
assert start.index == 1, start
# first token of rule b is the 7th token (counting hidden tokens)
assert stop.index == 7, stop
assert text == "a a a a", text
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t009lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('085')
lexer = self.getLexer(stream)
token = lexer.nextToken()
assert token.type == self.lexerModule.DIGIT
assert token.start == 0, token.start
assert token.stop == 0, token.stop
assert token.text == '0', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.DIGIT
assert token.start == 1, token.start
assert token.stop == 1, token.stop
assert token.text == '8', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.DIGIT
assert token.start == 2, token.start
assert token.stop == 2, token.stop
assert token.text == '5', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.EOF
def testMalformedInput(self):
stream = antlr3.StringStream('2a')
lexer = self.getLexer(stream)
lexer.nextToken()
try:
token = lexer.nextToken()
raise AssertionError, token
except antlr3.MismatchedRangeException, exc:
assert exc.a == '0', repr(exc.a)
assert exc.b == '9', repr(exc.b)
assert exc.unexpectedType == 'a', repr(exc.unexpectedType)
assert exc.charPositionInLine == 1, repr(exc.charPositionInLine)
assert exc.line == 1, repr(exc.line)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t008lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('ffaf')
lexer = self.getLexer(stream)
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 0, token.start
assert token.stop == 0, token.stop
assert token.text == 'f', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 1, token.start
assert token.stop == 2, token.stop
assert token.text == 'fa', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 3, token.start
assert token.stop == 3, token.stop
assert token.text == 'f', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.EOF
def testMalformedInput(self):
stream = antlr3.StringStream('fafb')
lexer = self.getLexer(stream)
lexer.nextToken()
lexer.nextToken()
try:
token = lexer.nextToken()
raise AssertionError, token
except antlr3.MismatchedTokenException, exc:
assert exc.unexpectedType == 'b', repr(exc.unexpectedType)
assert exc.charPositionInLine == 3, repr(exc.charPositionInLine)
assert exc.line == 1, repr(exc.line)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t007lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('fofababbooabb')
lexer = self.getLexer(stream)
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 0, token.start
assert token.stop == 1, token.stop
assert token.text == 'fo', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 2, token.start
assert token.stop == 12, token.stop
assert token.text == 'fababbooabb', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.EOF
def testMalformedInput(self):
stream = antlr3.StringStream('foaboao')
lexer = self.getLexer(stream)
try:
token = lexer.nextToken()
raise AssertionError, token
except antlr3.EarlyExitException, exc:
assert exc.unexpectedType == 'o', repr(exc.unexpectedType)
assert exc.charPositionInLine == 6, repr(exc.charPositionInLine)
assert exc.line == 1, repr(exc.line)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t036multipleReturnValues(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream(' a')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
ret = parser.a()
assert ret.foo == 'foo', ret.foo
assert ret.bar == 'bar', ret.bar
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import stringtemplate3
import testbase
import sys
import os
from StringIO import StringIO
# FIXME: port other tests from TestLexer.java
class T(testbase.ANTLRTest):
def execParser(self, grammar, grammarEntry, input):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
result = getattr(parser, grammarEntry)()
return result
def testRefToRuleDoesNotSetChannel(self):
# this must set channel of A to HIDDEN. $channel is local to rule
# like $type.
grammar = textwrap.dedent(
r'''
grammar P;
options {
language=Python;
}
a returns [foo]: A EOF { $foo = '\%s, channel=\%d' \% ($A.text, $A.channel); } ;
A : '-' WS I ;
I : '0'..'9'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
"- 34"
)
self.failUnlessEqual("- 34, channel=0", found)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t026actions(testbase.ANTLRTest):
def parserClass(self, base):
class TParser(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._errors = []
self._output = ""
def capture(self, t):
self._output += t
def emitErrorMessage(self, msg):
self._errors.append(msg)
return TParser
def lexerClass(self, base):
class TLexer(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._errors = []
self._output = ""
def capture(self, t):
self._output += t
def emitErrorMessage(self, msg):
self._errors.append(msg)
return TLexer
def setUp(self):
self.compileGrammar()
def testValid1(self):
cStream = antlr3.StringStream('foobar _Ab98 \n A12sdf')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.prog()
self.assertEqual(
parser._output,
'init;after;finally;')
self.assertEqual(
lexer._output,
'action;u\'foobar\' 4 1 0 -1 0 0 5;attribute;action;u\'_Ab98\' 4 1 7 -1 0 7 11;attribute;action;u\'A12sdf\' 4 2 1 -1 0 15 20;attribute;')
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t029synpredgate(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid1(self):
stream = antlr3.StringStream('ac')
lexer = self.getLexer(stream)
token = lexer.nextToken()
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t030specialStates(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid1(self):
cStream = antlr3.StringStream('foo')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.r()
def testValid2(self):
cStream = antlr3.StringStream('foo name1')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.r()
def testValid3(self):
cStream = antlr3.StringStream('bar name1')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.cond = False
events = parser.r()
def testValid4(self):
cStream = antlr3.StringStream('bar name1 name2')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.cond = False
events = parser.r()
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t013parser(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.document()
assert parser.events == ['before', 'after']
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
import os
import sys
from cStringIO import StringIO
import difflib
import textwrap
class t012lexerXML(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar('t012lexerXMLLexer.g')
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
inputPath = os.path.splitext(__file__)[0] + '.input'
stream = antlr3.StringStream(unicode(open(inputPath).read(), 'utf-8'))
lexer = self.getLexer(stream)
while True:
token = lexer.nextToken()
if token.type == self.lexerModule.EOF:
break
output = unicode(lexer.outbuf.getvalue(), 'utf-8')
outputPath = os.path.splitext(__file__)[0] + '.output'
testOutput = unicode(open(outputPath).read(), 'utf-8')
success = (output == testOutput)
if not success:
d = difflib.Differ()
r = d.compare(output.splitlines(1), testOutput.splitlines(1))
self.fail(
''.join([l.encode('ascii', 'backslashreplace') for l in r])
)
def testMalformedInput1(self):
input = textwrap.dedent("""\
<?xml version='1.0'?>
<document d>
</document>
""")
stream = antlr3.StringStream(input)
lexer = self.getLexer(stream)
try:
while True:
token = lexer.nextToken()
if token.type == antlr3.EOF:
break
raise AssertionError
except antlr3.NoViableAltException, exc:
assert exc.unexpectedType == '>', repr(exc.unexpectedType)
assert exc.charPositionInLine == 11, repr(exc.charPositionInLine)
assert exc.line == 2, repr(exc.line)
def testMalformedInput2(self):
input = textwrap.dedent("""\
<?tml version='1.0'?>
<document>
</document>
""")
stream = antlr3.StringStream(input)
lexer = self.getLexer(stream)
try:
while True:
token = lexer.nextToken()
if token.type == antlr3.EOF:
break
raise AssertionError
except antlr3.MismatchedSetException, exc:
assert exc.unexpectedType == 't', repr(exc.unexpectedType)
assert exc.charPositionInLine == 2, repr(exc.charPositionInLine)
assert exc.line == 1, repr(exc.line)
def testMalformedInput3(self):
input = textwrap.dedent("""\
<?xml version='1.0'?>
<docu ment attr="foo">
</document>
""")
stream = antlr3.StringStream(input)
lexer = self.getLexer(stream)
try:
while True:
token = lexer.nextToken()
if token.type == antlr3.EOF:
break
raise AssertionError
except antlr3.NoViableAltException, exc:
assert exc.unexpectedType == 'a', repr(exc.unexpectedType)
assert exc.charPositionInLine == 11, repr(exc.charPositionInLine)
assert exc.line == 2, repr(exc.line)
if __name__ == '__main__':
unittest.main()
## # run an infinite loop with randomly mangled input
## while True:
## print "ping"
## input = """\
## <?xml version='1.0'?>
## <!DOCTYPE component [
## <!ELEMENT component (PCDATA|sub)*>
## <!ATTLIST component
## attr CDATA #IMPLIED
## attr2 CDATA #IMPLIED
## >
## <!ELMENT sub EMPTY>
## ]>
## <component attr="val'ue" attr2='val"ue'>
## <!-- This is a comment -->
## Text
## <![CDATA[huhu]]>
## &
## <
## <?xtal cursor='11'?>
## <sub/>
## <sub></sub>
## </component>
## """
## import random
## input = list(input) # make it mutable
## for _ in range(3):
## p1 = random.randrange(len(input))
## p2 = random.randrange(len(input))
## c1 = input[p1]
## input[p1] = input[p2]
## input[p2] = c1
## input = ''.join(input) # back to string
## stream = antlr3.StringStream(input)
## lexer = Lexer(stream)
## try:
## while True:
## token = lexer.nextToken()
## if token.type == EOF:
## break
## except antlr3.RecognitionException, exc:
## print exc
## for l in input.splitlines()[0:exc.line]:
## print l
## print ' '*exc.charPositionInLine + '^'
## except BaseException, exc:
## print '\n'.join(['%02d: %s' % (idx+1, l) for idx, l in enumerate(input.splitlines())])
## print "%s at %d:%d" % (exc, stream.line, stream.charPositionInLine)
## print
## raise
| Python |
import unittest
import imp
import os
import errno
import sys
import glob
import re
import tempfile
import shutil
import inspect
from distutils.errors import *
import antlr3
def unlink(path):
try:
os.unlink(path)
except OSError, exc:
if exc.errno != errno.ENOENT:
raise
# At least on MacOSX tempdir (/tmp) is a symlink. It's sometimes dereferences,
# sometimes not, breaking the inspect.getmodule() function.
testbasedir = os.path.join(
os.path.realpath(tempfile.gettempdir()),
'antlr3-test')
class BrokenTest(unittest.TestCase.failureException):
def __repr__(self):
name, reason = self.args
return '%s: %s: %s works now' % (
(self.__class__.__name__, name, reason))
def broken(reason, *exceptions):
'''Indicates a failing (or erroneous) test case fails that should succeed.
If the test fails with an exception, list the exception type in args'''
def wrapper(test_method):
def replacement(*args, **kwargs):
try:
test_method(*args, **kwargs)
except exceptions or unittest.TestCase.failureException:
pass
else:
raise BrokenTest(test_method.__name__, reason)
replacement.__doc__ = test_method.__doc__
replacement.__name__ = 'XXX_' + test_method.__name__
replacement.todo = reason
return replacement
return wrapper
dependencyCache = {}
compileErrorCache = {}
# setup java CLASSPATH
if 'CLASSPATH' not in os.environ:
cp = []
baseDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
libDir = os.path.join(baseDir, 'lib')
jar = os.path.join(libDir, 'stringtemplate-3.1b1.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
jar = os.path.join(libDir, 'antlr-2.7.7.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
jar = os.path.join(libDir, 'junit-4.2.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
cp.append(os.path.join(baseDir, 'runtime', 'Python', 'build'))
classpath = '-cp "' + ':'.join([os.path.abspath(p) for p in cp]) + '"'
else:
classpath = ''
class ANTLRTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.moduleName = os.path.splitext(os.path.basename(sys.modules[self.__module__].__file__))[0]
self.className = self.__class__.__name__
self._baseDir = None
self.lexerModule = None
self.parserModule = None
self.grammarName = None
self.grammarType = None
self.antlr_version = antlr3.version_str_to_tuple(
os.environ.get('ANTLRVERSION', 'HEAD'))
@property
def baseDir(self):
if self._baseDir is None:
testName = 'unknownTest'
for frame in inspect.stack():
code = frame[0].f_code
codeMod = inspect.getmodule(code)
if codeMod is None:
continue
# skip frames not in requested module
if codeMod is not sys.modules[self.__module__]:
continue
# skip some unwanted names
if code.co_name in ('nextToken', '<module>'):
continue
if code.co_name.startswith('test'):
testName = code.co_name
break
self._baseDir = os.path.join(
testbasedir,
self.moduleName, self.className, testName)
if not os.path.isdir(self._baseDir):
os.makedirs(self._baseDir)
return self._baseDir
def _invokeantlr(self, dir, file, options, javaOptions=''):
cmd = 'cd %s; java %s %s org.antlr.Tool -o . %s %s 2>&1' % (
dir, javaOptions, classpath, options, file
)
fp = os.popen(cmd)
output = ''
failed = False
for line in fp:
output += line
if line.startswith('error('):
failed = True
rc = fp.close()
if rc is not None:
failed = True
if failed:
raise RuntimeError(
"Failed to compile grammar '%s':\n%s\n\n" % (file, cmd)
+ output
)
def compileGrammar(self, grammarName=None, options='', javaOptions=''):
if grammarName is None:
grammarName = self.moduleName + '.g'
self._baseDir = os.path.join(
testbasedir,
self.moduleName)
if not os.path.isdir(self._baseDir):
os.makedirs(self._baseDir)
if self.grammarName is None:
self.grammarName = os.path.splitext(grammarName)[0]
grammarPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), grammarName)
# get type and name from first grammar line
grammar = open(grammarPath, 'r').read()
m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE)
assert m is not None, grammar
self.grammarType = m.group(2)
if self.grammarType is None:
self.grammarType = 'combined'
if self.grammarType is None:
assert self.grammarType in ('lexer', 'parser', 'tree', 'combined'), self.grammarType
# don't try to rebuild grammar, if it already failed
if grammarName in compileErrorCache:
return
try:
# get dependencies from antlr
if grammarName in dependencyCache:
dependencies = dependencyCache[grammarName]
else:
dependencies = []
cmd = ('cd %s; java %s %s org.antlr.Tool -o . -depend %s 2>&1'
% (self.baseDir, javaOptions, classpath, grammarPath)
)
output = ""
failed = False
fp = os.popen(cmd)
for line in fp:
output += line
if line.startswith('error('):
failed = True
elif ':' in line:
a, b = line.strip().split(':', 1)
dependencies.append(
(os.path.join(self.baseDir, a.strip()),
[os.path.join(self.baseDir, b.strip())])
)
rc = fp.close()
if rc is not None:
failed = True
if failed:
raise RuntimeError(
"antlr -depend failed with code %s on grammar '%s':\n\n"
% (rc, grammarName)
+ cmd
+ "\n"
+ output
)
# add dependencies to my .stg files
templateDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'org', 'antlr', 'codegen', 'templates', 'Python'))
templates = glob.glob(os.path.join(templateDir, '*.stg'))
for dst, src in dependencies:
src.extend(templates)
dependencyCache[grammarName] = dependencies
rebuild = False
for dest, sources in dependencies:
if not os.path.isfile(dest):
rebuild = True
break
for source in sources:
if os.path.getmtime(source) > os.path.getmtime(dest):
rebuild = True
break
if rebuild:
self._invokeantlr(self.baseDir, grammarPath, options, javaOptions)
except:
# mark grammar as broken
compileErrorCache[grammarName] = True
raise
def lexerClass(self, base):
"""Optionally build a subclass of generated lexer class"""
return base
def parserClass(self, base):
"""Optionally build a subclass of generated parser class"""
return base
def walkerClass(self, base):
"""Optionally build a subclass of generated walker class"""
return base
def __load_module(self, name):
modFile, modPathname, modDescription \
= imp.find_module(name, [self.baseDir])
return imp.load_module(
name, modFile, modPathname, modDescription
)
def getLexer(self, *args, **kwargs):
"""Build lexer instance. Arguments are passed to lexer.__init__()."""
if self.grammarType == 'lexer' and self.antlr_version >= (3, 1, 0, 0):
self.lexerModule = self.__load_module(self.grammarName)
cls = getattr(self.lexerModule, self.grammarName)
else:
self.lexerModule = self.__load_module(self.grammarName + 'Lexer')
cls = getattr(self.lexerModule, self.grammarName + 'Lexer')
cls = self.lexerClass(cls)
lexer = cls(*args, **kwargs)
return lexer
def getParser(self, *args, **kwargs):
"""Build parser instance. Arguments are passed to parser.__init__()."""
if self.grammarType == 'parser':
self.lexerModule = self.__load_module(self.grammarName)
cls = getattr(self.lexerModule, self.grammarName)
else:
self.parserModule = self.__load_module(self.grammarName + 'Parser')
cls = getattr(self.parserModule, self.grammarName + 'Parser')
cls = self.parserClass(cls)
parser = cls(*args, **kwargs)
return parser
def getWalker(self, *args, **kwargs):
"""Build walker instance. Arguments are passed to walker.__init__()."""
self.walkerModule = self.__load_module(self.grammarName + 'Walker')
cls = getattr(self.walkerModule, self.grammarName + 'Walker')
cls = self.walkerClass(cls)
walker = cls(*args, **kwargs)
return walker
def writeInlineGrammar(self, grammar):
# get type and name from first grammar line
m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar, re.MULTILINE)
assert m is not None, grammar
grammarType = m.group(2)
if grammarType is None:
grammarType = 'combined'
grammarName = m.group(3)
assert grammarType in ('lexer', 'parser', 'tree', 'combined'), grammarType
grammarPath = os.path.join(self.baseDir, grammarName + '.g')
# dump temp grammar file
fp = open(grammarPath, 'w')
fp.write(grammar)
fp.close()
return grammarName, grammarPath, grammarType
def writeFile(self, name, contents):
testDir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(self.baseDir, name)
fp = open(path, 'w')
fp.write(contents)
fp.close()
return path
def compileInlineGrammar(self, grammar, options='', javaOptions='',
returnModule=False):
# write grammar file
grammarName, grammarPath, grammarType = self.writeInlineGrammar(grammar)
# compile it
self._invokeantlr(
os.path.dirname(grammarPath),
os.path.basename(grammarPath),
options,
javaOptions
)
if grammarType == 'combined':
lexerMod = self.__load_module(grammarName + 'Lexer')
parserMod = self.__load_module(grammarName + 'Parser')
if returnModule:
return lexerMod, parserMod
lexerCls = getattr(lexerMod, grammarName + 'Lexer')
lexerCls = self.lexerClass(lexerCls)
parserCls = getattr(parserMod, grammarName + 'Parser')
parserCls = self.parserClass(parserCls)
return lexerCls, parserCls
if grammarType == 'lexer':
lexerMod = self.__load_module(grammarName)
if returnModule:
return lexerMod
lexerCls = getattr(lexerMod, grammarName)
lexerCls = self.lexerClass(lexerCls)
return lexerCls
if grammarType == 'parser':
parserMod = self.__load_module(grammarName)
if returnModule:
return parserMod
parserCls = getattr(parserMod, grammarName)
parserCls = self.parserClass(parserCls)
return parserCls
if grammarType == 'tree':
walkerMod = self.__load_module(grammarName)
if returnModule:
return walkerMod
walkerCls = getattr(walkerMod, grammarName)
walkerCls = self.walkerClass(walkerCls)
return walkerCls
| Python |
import os
import sys
import antlr3
import testbase
import unittest
from cStringIO import StringIO
import difflib
class t020fuzzy(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar('t020fuzzyLexer.g')
def testValid(self):
inputPath = os.path.splitext(__file__)[0] + '.input'
stream = antlr3.StringStream(open(inputPath).read())
lexer = self.getLexer(stream)
while True:
token = lexer.nextToken()
if token.type == antlr3.EOF:
break
output = lexer.output.getvalue()
outputPath = os.path.splitext(__file__)[0] + '.output'
testOutput = open(outputPath).read()
success = (output == testOutput)
if not success:
d = difflib.Differ()
r = d.compare(output.splitlines(1), testOutput.splitlines(1))
self.fail(
''.join([l.encode('ascii', 'backslashreplace') for l in r])
)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
import os
import sys
from cStringIO import StringIO
import difflib
class t018llstar(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid(self):
inputPath = os.path.splitext(__file__)[0] + '.input'
cStream = antlr3.StringStream(open(inputPath).read())
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.program()
output = parser.output.getvalue()
outputPath = os.path.splitext(__file__)[0] + '.output'
testOutput = open(outputPath).read()
success = (output == testOutput)
if not success:
d = difflib.Differ()
r = d.compare(output.splitlines(1), testOutput.splitlines(1))
self.fail(
''.join([l.encode('ascii', 'backslashreplace') for l in r])
)
if __name__ == '__main__':
unittest.main()
## # run an infinite loop with randomly mangled input
## while True:
## print "ping"
## input = open(inputPath).read()
## import random
## input = list(input) # make it mutable
## for _ in range(3):
## p1 = random.randrange(len(input))
## p2 = random.randrange(len(input))
## c1 = input[p1]
## input[p1] = input[p2]
## input[p2] = c1
## input = ''.join(input) # back to string
## try:
## cStream = antlr3.StringStream(input)
## lexer = Lexer(cStream)
## tStream = antlr3.CommonTokenStream(lexer)
## parser = TestParser(tStream)
## parser.program()
## except antlr3.RecognitionException, exc:
## print exc
## for l in input.splitlines()[0:exc.line]:
## print l
## print ' '*exc.charPositionInLine + '^'
## except BaseException, exc:
## print '\n'.join(['%02d: %s' % (idx+1, l) for idx, l in enumerate(input.splitlines())])
## print "%s at %d:%d" % (exc, cStream.line, cStream.charPositionInLine)
## print
## raise
| Python |
import antlr3
import testbase
import unittest
class t017parser(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def parserClass(self, base):
class TestParser(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self.reportedErrors = []
def emitErrorMessage(self, msg):
self.reportedErrors.append(msg)
return TestParser
def testValid(self):
cStream = antlr3.StringStream("int foo;")
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.program()
assert len(parser.reportedErrors) == 0, parser.reportedErrors
def testMalformedInput1(self):
cStream = antlr3.StringStream('int foo() { 1+2 }')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.program()
# FIXME: currently strings with formatted errors are collected
# can't check error locations yet
assert len(parser.reportedErrors) == 1, parser.reportedErrors
def testMalformedInput2(self):
cStream = antlr3.StringStream('int foo() { 1+; 1+2 }')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.program()
# FIXME: currently strings with formatted errors are collected
# can't check error locations yet
assert len(parser.reportedErrors) == 2, parser.reportedErrors
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
class T(testbase.ANTLRTest):
def walkerClass(self, base):
class TWalker(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self.traces = []
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TWalker
def setUp(self):
self.compileGrammar()
self.compileGrammar('t047treeparserWalker.g', options='-trace')
def testWalker(self):
input = textwrap.dedent(
'''\
char c;
int x;
void bar(int x);
int foo(int y, char d) {
int i;
for (i=0; i<3; i=i+1) {
x=3;
y=5;
}
}
''')
cStream = antlr3.StringStream(input)
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
r = parser.program()
self.failUnlessEqual(
r.tree.toStringTree(),
"(VAR_DEF char c) (VAR_DEF int x) (FUNC_DECL (FUNC_HDR void bar (ARG_DEF int x))) (FUNC_DEF (FUNC_HDR int foo (ARG_DEF int y) (ARG_DEF char d)) (BLOCK (VAR_DEF int i) (for (= i 0) (< i 3) (= i (+ i 1)) (BLOCK (= x 3) (= y 5)))))"
)
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = self.getWalker(nodes)
walker.program()
# FIXME: need to crosscheck with Java target (compile walker with
# -trace option), if this is the real list. For now I'm happy that
# it does not crash ;)
self.failUnlessEqual(
walker.traces,
[ '>program', '>declaration', '>variable', '>type', '<type',
'>declarator', '<declarator', '<variable', '<declaration',
'>declaration', '>variable', '>type', '<type', '>declarator',
'<declarator', '<variable', '<declaration', '>declaration',
'>functionHeader', '>type', '<type', '>formalParameter',
'>type', '<type', '>declarator', '<declarator',
'<formalParameter', '<functionHeader', '<declaration',
'>declaration', '>functionHeader', '>type', '<type',
'>formalParameter', '>type', '<type', '>declarator',
'<declarator', '<formalParameter', '>formalParameter', '>type',
'<type', '>declarator', '<declarator', '<formalParameter',
'<functionHeader', '>block', '>variable', '>type', '<type',
'>declarator', '<declarator', '<variable', '>stat', '>forStat',
'>expr', '>expr', '>atom', '<atom', '<expr', '<expr', '>expr',
'>expr', '>atom', '<atom', '<expr', '>expr', '>atom', '<atom',
'<expr', '<expr', '>expr', '>expr', '>expr', '>atom', '<atom',
'<expr', '>expr', '>atom', '<atom', '<expr', '<expr', '<expr',
'>block', '>stat', '>expr', '>expr', '>atom', '<atom', '<expr',
'<expr', '<stat', '>stat', '>expr', '>expr', '>atom', '<atom',
'<expr', '<expr', '<stat', '<block', '<forStat', '<stat',
'<block', '<declaration', '<program'
]
)
def testRuleLabelPropertyRefText(self):
self.compileGrammar()
self.compileGrammar('t047treeparserWalker.g', options='-trace')
input = textwrap.dedent(
'''\
char c;
''')
cStream = antlr3.StringStream(input)
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
r = parser.variable()
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = self.getWalker(nodes)
r = walker.variable()
self.failUnlessEqual(r, 'c')
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
import sys
class TestAutoAST(testbase.ANTLRTest):
def parserClass(self, base):
class TParser(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._errors = []
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def emitErrorMessage(self, msg):
self._errors.append(msg)
return TParser
def lexerClass(self, base):
class TLexer(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def execParser(self, grammar, grammarEntry, input, expectErrors=False):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
r = getattr(parser, grammarEntry)()
if not expectErrors:
self.assertEquals(len(parser._errors), 0, parser._errors)
result = ""
if r is not None:
if hasattr(r, 'result'):
result += r.result
if r.tree is not None:
result += r.tree.toStringTree()
if not expectErrors:
return result
else:
return result, parser._errors
def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
walkerCls = self.compileInlineGrammar(treeGrammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
r = getattr(parser, grammarEntry)()
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = walkerCls(nodes)
r = getattr(walker, treeEntry)()
if r is not None:
return r.tree.toStringTree()
return ""
def testTokenList(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : ID INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;};
''')
found = self.execParser(grammar, "a", "abc 34")
self.assertEquals("abc 34", found);
def testTokenListInSingleAltBlock(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : (ID INT) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar,"a", "abc 34")
self.assertEquals("abc 34", found)
def testSimpleRootAtOuterLevel(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : ID^ INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc 34")
self.assertEquals("(abc 34)", found)
def testSimpleRootAtOuterLevelReverse(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : INT ID^ ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "34 abc")
self.assertEquals("(abc 34)", found)
def testBang(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID INT! ID! INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc 34 dag 4532")
self.assertEquals("abc 4532", found)
def testOptionalThenRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ( ID INT )? ID^ ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 1 b")
self.assertEquals("(b a 1)", found)
def testLabeledStringRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : v='void'^ ID ';' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "void foo;")
self.assertEquals("(void foo ;)", found)
def testWildcard(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : v='void'^ . ';' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "void foo;")
self.assertEquals("(void foo ;)", found)
def testWildcardRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : v='void' .^ ';' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "void foo;")
self.assertEquals("(foo void ;)", found)
def testWildcardRootWithLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : v='void' x=.^ ';' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "void foo;")
self.assertEquals("(foo void ;)", found)
def testWildcardRootWithListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : v='void' x=.^ ';' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "void foo;")
self.assertEquals("(foo void ;)", found)
def testRootRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID^ INT^ ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 34 c")
self.assertEquals("(34 a c)", found)
def testRootRoot2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID INT^ ID^ ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 34 c")
self.assertEquals("(c (34 a))", found)
def testRootThenRootInLoop(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID^ (INT '*'^ ID)+ ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 34 * b 9 * c")
self.assertEquals("(* (* (a 34) b 9) c)", found)
def testNestedSubrule(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'void' (({pass}ID|INT) ID | 'null' ) ';' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "void a b;")
self.assertEquals("void a b ;", found)
def testInvokeRule(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : type ID ;
type : {pass}'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a")
self.assertEquals("int a", found)
def testInvokeRuleAsRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : type^ ID ;
type : {pass}'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a")
self.assertEquals("(int a)", found)
def testInvokeRuleAsRootWithLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : x=type^ ID ;
type : {pass}'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a")
self.assertEquals("(int a)", found)
def testInvokeRuleAsRootWithListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : x+=type^ ID ;
type : {pass}'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a")
self.assertEquals("(int a)", found)
def testRuleRootInLoop(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID ('+'^ ID)* ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a+b+c+d")
self.assertEquals("(+ (+ (+ a b) c) d)", found)
def testRuleInvocationRuleRootInLoop(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID (op^ ID)* ;
op : {pass}'+' | '-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a+b+c-d")
self.assertEquals("(- (+ (+ a b) c) d)", found)
def testTailRecursion(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
s : a ;
a : atom ('exp'^ a)? ;
atom : INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "s", "3 exp 4 exp 5")
self.assertEquals("(exp 3 (exp 4 5))", found)
def testSet(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID|INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("abc", found)
def testSetRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ('+' | '-')^ ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "+abc")
self.assertEquals("(+ abc)", found)
@testbase.broken("FAILS until antlr.g rebuilt in v3", RuntimeError)
def testSetRootWithLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : x=('+' | '-')^ ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "+abc")
self.assertEquals("(+ abc)", found)
def testSetAsRuleRootInLoop(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID (('+'|'-')^ ID)* ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a+b-c")
self.assertEquals("(- (+ a b) c)", found)
def testNotSet(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ~ID '+' INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "34+2")
self.assertEquals("34 + 2", found)
def testNotSetWithLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : x=~ID '+' INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "34+2")
self.assertEquals("34 + 2", found)
def testNotSetWithListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : x=~ID '+' INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "34+2")
self.assertEquals("34 + 2", found)
def testNotSetRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ~'+'^ INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "34 55")
self.assertEquals("(34 55)", found)
def testNotSetRootWithLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ~'+'^ INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "34 55")
self.assertEquals("(34 55)", found)
def testNotSetRootWithListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ~'+'^ INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "34 55")
self.assertEquals("(34 55)", found)
def testNotSetRuleRootInLoop(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : INT (~INT^ INT)* ;
blort : '+' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "3+4+5")
self.assertEquals("(+ (+ 3 4) 5)", found)
@testbase.broken("FIXME: What happened to the semicolon?", AssertionError)
def testTokenLabelReuse(self):
# check for compilation problem due to multiple defines
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a returns [result] : id=ID id=ID {$result = "2nd id="+$id.text+";";} ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("2nd id=b;a b", found)
def testTokenLabelReuse2(self):
# check for compilation problem due to multiple defines
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a returns [result]: id=ID id=ID^ {$result = "2nd id="+$id.text+',';} ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("2nd id=b,(b a)", found)
def testTokenListLabelReuse(self):
# check for compilation problem due to multiple defines
# make sure ids has both ID tokens
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a returns [result] : ids+=ID ids+=ID {$result = "id list=["+",".join([t.text for t in $ids])+'],';} ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
expecting = "id list=[a,b],a b"
self.assertEquals(expecting, found)
def testTokenListLabelReuse2(self):
# check for compilation problem due to multiple defines
# make sure ids has both ID tokens
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a returns [result] : ids+=ID^ ids+=ID {$result = "id list=["+",".join([t.text for t in $ids])+'],';} ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
expecting = "id list=[a,b],(a b)"
self.assertEquals(expecting, found)
def testTokenListLabelRuleRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : id+=ID^ ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("a", found)
def testTokenListLabelBang(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : id+=ID! ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("", found)
def testRuleListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a returns [result]: x+=b x+=b {
t=$x[1]
$result = "2nd x="+t.toStringTree()+',';
};
b : ID;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("2nd x=b,a b", found)
def testRuleListLabelRuleRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a returns [result] : ( x+=b^ )+ {
$result = "x="+$x[1].toStringTree()+',';
} ;
b : ID;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("x=(b a),(b a)", found)
def testRuleListLabelBang(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a returns [result] : x+=b! x+=b {
$result = "1st x="+$x[0].toStringTree()+',';
} ;
b : ID;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("1st x=a,b", found)
def testComplicatedMelange(self):
# check for compilation problem
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : A b=B b=B c+=C c+=C D {s = $D.text} ;
A : 'a' ;
B : 'b' ;
C : 'c' ;
D : 'd' ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b b c c d")
self.assertEquals("a b b c c d", found)
def testReturnValueWithAST(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a returns [result] : ID b { $result = str($b.i) + '\n';} ;
b returns [i] : INT {$i=int($INT.text);} ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc 34")
self.assertEquals("34\nabc 34", found)
def testSetLoop(self):
grammar = textwrap.dedent(
r'''
grammar T;
options { language=Python;output=AST; }
r : (INT|ID)+ ;
ID : 'a'..'z' + ;
INT : '0'..'9' +;
WS: (' ' | '\n' | '\\t')+ {$channel = HIDDEN;};
''')
found = self.execParser(grammar, "r", "abc 34 d")
self.assertEquals("abc 34 d", found)
def testExtraTokenInSimpleDecl(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
decl : type^ ID '='! INT ';'! ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "decl", "int 34 x=1;",
expectErrors=True)
self.assertEquals(["line 1:4 extraneous input u'34' expecting ID"],
errors)
self.assertEquals("(int x 1)", found) # tree gets correct x and 1 tokens
def testMissingIDInSimpleDecl(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
tokens {EXPR;}
decl : type^ ID '='! INT ';'! ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "decl", "int =1;",
expectErrors=True)
self.assertEquals(["line 1:4 missing ID at u'='"], errors)
self.assertEquals("(int <missing ID> 1)", found) # tree gets invented ID token
def testMissingSetInSimpleDecl(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
tokens {EXPR;}
decl : type^ ID '='! INT ';'! ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "decl", "x=1;",
expectErrors=True)
self.assertEquals(["line 1:0 mismatched input u'x' expecting set None"], errors)
self.assertEquals("(<error: x> x 1)", found) # tree gets invented ID token
def testMissingTokenGivesErrorNode(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : ID INT ; // follow is EOF
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "abc", expectErrors=True)
self.assertEquals(["line 0:-1 missing INT at '<EOF>'"], errors)
self.assertEquals("abc <missing INT>", found)
def testMissingTokenGivesErrorNodeInInvokedRule(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : b ;
b : ID INT ; // follow should see EOF
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "abc", expectErrors=True)
self.assertEquals(["line 0:-1 missing INT at '<EOF>'"], errors)
self.assertEquals("abc <missing INT>", found)
def testExtraTokenGivesErrorNode(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : b c ;
b : ID ;
c : INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "abc ick 34",
expectErrors=True)
self.assertEquals(["line 1:4 extraneous input u'ick' expecting INT"],
errors)
self.assertEquals("abc 34", found)
def testMissingFirstTokenGivesErrorNode(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : ID INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
self.assertEquals(["line 1:0 missing ID at u'34'"], errors)
self.assertEquals("<missing ID> 34", found)
def testMissingFirstTokenGivesErrorNode2(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : b c ;
b : ID ;
c : INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
# finds an error at the first token, 34, and re-syncs.
# re-synchronizing does not consume a token because 34 follows
# ref to rule b (start of c). It then matches 34 in c.
self.assertEquals(["line 1:0 missing ID at u'34'"], errors)
self.assertEquals("<missing ID> 34", found)
def testNoViableAltGivesErrorNode(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : b | c ;
b : ID ;
c : INT ;
ID : 'a'..'z'+ ;
S : '*' ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "*", expectErrors=True)
self.assertEquals(["line 1:0 no viable alternative at input u'*'"],
errors)
self.assertEquals("<unexpected: [@0,0:0=u'*',<6>,1:0], resync=*>",
found)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t013parser(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid(self):
cStream = antlr3.StringStream('foobar')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.document()
assert len(parser.reportedErrors) == 0, parser.reportedErrors
assert parser.identifiers == ['foobar']
def testMalformedInput1(self):
cStream = antlr3.StringStream('')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
parser.document()
# FIXME: currently strings with formatted errors are collected
# can't check error locations yet
assert len(parser.reportedErrors) == 1, parser.reportedErrors
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t031emptyAlt(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid1(self):
cStream = antlr3.StringStream('foo')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.r()
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
import sys
class TestRewriteAST(testbase.ANTLRTest):
def parserClass(self, base):
class TParser(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._errors = []
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def emitErrorMessage(self, msg):
self._errors.append(msg)
return TParser
def lexerClass(self, base):
class TLexer(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def execParser(self, grammar, grammarEntry, input, expectErrors=False):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
r = getattr(parser, grammarEntry)()
if not expectErrors:
self.assertEquals(len(parser._errors), 0, parser._errors)
result = ""
if r is not None:
if hasattr(r, 'result'):
result += r.result
if r.tree is not None:
result += r.tree.toStringTree()
if not expectErrors:
return result
else:
return result, parser._errors
def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
walkerCls = self.compileInlineGrammar(treeGrammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
r = getattr(parser, grammarEntry)()
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = walkerCls(nodes)
r = getattr(walker, treeEntry)()
if r is not None:
return r.tree.toStringTree()
return ""
def testDelete(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID INT -> ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc 34")
self.assertEquals("", found)
def testSingleToken(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID -> ID;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("abc", found)
def testSingleTokenToNewNode(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID -> ID["x"];
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("x", found)
def testSingleTokenToNewNodeRoot(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID -> ^(ID["x"] INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("(x INT)", found)
def testSingleTokenToNewNode2(self):
# Allow creation of new nodes w/o args.
grammar = textwrap.dedent(
r'''
grammar TT;
options {language=Python;output=AST;}
a : ID -> ID[ ];
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("ID", found)
def testSingleCharLiteral(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'c' -> 'c';
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "c")
self.assertEquals("c", found)
def testSingleStringLiteral(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'ick' -> 'ick';
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "ick")
self.assertEquals("ick", found)
def testSingleRule(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : b -> b;
b : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("abc", found)
def testReorderTokens(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID INT -> INT ID;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc 34")
self.assertEquals("34 abc", found)
def testReorderTokenAndRule(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : b INT -> INT b;
b : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc 34")
self.assertEquals("34 abc", found)
def testTokenTree(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID INT -> ^(INT ID);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc 34")
self.assertEquals("(34 abc)", found)
def testTokenTreeAfterOtherStuff(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'void' ID INT -> 'void' ^(INT ID);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "void abc 34")
self.assertEquals("void (34 abc)", found)
def testNestedTokenTreeWithOuterLoop(self):
# verify that ID and INT both iterate over outer index variable
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {DUH;}
a : ID INT ID INT -> ^( DUH ID ^( DUH INT) )+ ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 1 b 2")
self.assertEquals("(DUH a (DUH 1)) (DUH b (DUH 2))", found)
def testOptionalSingleToken(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID -> ID? ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("abc", found)
def testClosureSingleToken(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID ID -> ID* ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testPositiveClosureSingleToken(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID ID -> ID+ ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testOptionalSingleRule(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : b -> b?;
b : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("abc", found)
def testClosureSingleRule(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : b b -> b*;
b : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testClosureOfLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : x+=b x+=b -> $x*;
b : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testOptionalLabelNoListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : (x=ID)? -> $x?;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("a", found)
def testPositiveClosureSingleRule(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : b b -> b+;
b : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testSinglePredicateT(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID -> {True}? ID -> ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("abc", found)
def testSinglePredicateF(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID -> {False}? ID -> ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc")
self.assertEquals("", found)
def testMultiplePredicate(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID INT -> {False}? ID
-> {True}? INT
->
;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 2")
self.assertEquals("2", found)
def testMultiplePredicateTrees(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID INT -> {False}? ^(ID INT)
-> {True}? ^(INT ID)
-> ID
;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 2")
self.assertEquals("(2 a)", found)
def testSimpleTree(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : op INT -> ^(op INT);
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "-34")
self.assertEquals("(- 34)", found)
def testSimpleTree2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : op INT -> ^(INT op);
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "+ 34")
self.assertEquals("(34 +)", found)
def testNestedTrees(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'var' (ID ':' type ';')+ -> ^('var' ^(':' ID type)+) ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "var a:int; b:float;")
self.assertEquals("(var (: a int) (: b float))", found)
def testImaginaryTokenCopy(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {VAR;}
a : ID (',' ID)*-> ^(VAR ID)+ ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a,b,c")
self.assertEquals("(VAR a) (VAR b) (VAR c)", found)
def testTokenUnreferencedOnLeftButDefined(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {VAR;}
a : b -> ID ;
b : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("ID", found)
def testImaginaryTokenCopySetText(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {VAR;}
a : ID (',' ID)*-> ^(VAR["var"] ID)+ ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a,b,c")
self.assertEquals("(var a) (var b) (var c)", found)
def testImaginaryTokenNoCopyFromToken(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : lc='{' ID+ '}' -> ^(BLOCK[$lc] ID+) ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "{a b c}")
self.assertEquals("({ a b c)", found)
def testImaginaryTokenNoCopyFromTokenSetText(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : lc='{' ID+ '}' -> ^(BLOCK[$lc,"block"] ID+) ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "{a b c}")
self.assertEquals("(block a b c)", found)
def testMixedRewriteAndAutoAST(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : b b^ ; // 2nd b matches only an INT; can make it root
b : ID INT -> INT ID
| INT
;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 1 2")
self.assertEquals("(2 1 a)", found)
def testSubruleWithRewrite(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : b b ;
b : (ID INT -> INT ID | INT INT -> INT+ )
;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a 1 2 3")
self.assertEquals("1 a 2 3", found)
def testSubruleWithRewrite2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {TYPE;}
a : b b ;
b : 'int'
( ID -> ^(TYPE 'int' ID)
| ID '=' INT -> ^(TYPE 'int' ID INT)
)
';'
;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a; int b=3;")
self.assertEquals("(TYPE int a) (TYPE int b 3)", found)
def testNestedRewriteShutsOffAutoAST(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : b b ;
b : ID ( ID (last=ID -> $last)+ ) ';' // get last ID
| INT // should still get auto AST construction
;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b c d; 42")
self.assertEquals("d 42", found)
def testRewriteActions(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : atom -> ^({self.adaptor.create(INT,"9")} atom) ;
atom : INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "3")
self.assertEquals("(9 3)", found)
def testRewriteActions2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : atom -> {self.adaptor.create(INT,"9")} atom ;
atom : INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "3")
self.assertEquals("9 3", found)
def testRefToOldValue(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : (atom -> atom) (op='+' r=atom -> ^($op $a $r) )* ;
atom : INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "3+4+5")
self.assertEquals("(+ (+ 3 4) 5)", found)
def testCopySemanticsForRules(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : atom -> ^(atom atom) ; // NOT CYCLE! (dup atom)
atom : INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "3")
self.assertEquals("(3 3)", found)
def testCopySemanticsForRules2(self):
# copy type as a root for each invocation of (...)+ in rewrite
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : type ID (',' ID)* ';' -> ^(type ID)+ ;
type : 'int' ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a,b,c;")
self.assertEquals("(int a) (int b) (int c)", found)
def testCopySemanticsForRules3(self):
# copy type *and* modifier even though it's optional
# for each invocation of (...)+ in rewrite
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ;
type : 'int' ;
modifier : 'public' ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "public int a,b,c;")
self.assertEquals("(int public a) (int public b) (int public c)", found)
def testCopySemanticsForRules3Double(self):
# copy type *and* modifier even though it's optional
# for each invocation of (...)+ in rewrite
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : modifier? type ID (',' ID)* ';' -> ^(type modifier? ID)+ ^(type modifier? ID)+ ;
type : 'int' ;
modifier : 'public' ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "public int a,b,c;")
self.assertEquals("(int public a) (int public b) (int public c) (int public a) (int public b) (int public c)", found)
def testCopySemanticsForRules4(self):
# copy type *and* modifier even though it's optional
# for each invocation of (...)+ in rewrite
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {MOD;}
a : modifier? type ID (',' ID)* ';' -> ^(type ^(MOD modifier)? ID)+ ;
type : 'int' ;
modifier : 'public' ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "public int a,b,c;")
self.assertEquals("(int (MOD public) a) (int (MOD public) b) (int (MOD public) c)", found)
def testCopySemanticsLists(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {MOD;}
a : ID (',' ID)* ';' -> ID+ ID+ ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a,b,c;")
self.assertEquals("a b c a b c", found)
def testCopyRuleLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x=b -> $x $x;
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("a a", found)
def testCopyRuleLabel2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x=b -> ^($x $x);
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("(a a)", found)
def testQueueingOfTokens(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'int' ID (',' ID)* ';' -> ^('int' ID+) ;
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a,b,c;")
self.assertEquals("(int a b c)", found)
def testCopyOfTokens(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'int' ID ';' -> 'int' ID 'int' ID ;
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a;")
self.assertEquals("int a int a", found)
def testTokenCopyInLoop(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'int' ID (',' ID)* ';' -> ^('int' ID)+ ;
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a,b,c;")
self.assertEquals("(int a) (int b) (int c)", found)
def testTokenCopyInLoopAgainstTwoOthers(self):
# must smear 'int' copies across as root of multiple trees
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : 'int' ID ':' INT (',' ID ':' INT)* ';' -> ^('int' ID INT)+ ;
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "int a:1,b:2,c:3;")
self.assertEquals("(int a 1) (int b 2) (int c 3)", found)
def testListRefdOneAtATime(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID+ -> ID ID ID ; // works if 3 input IDs
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b c")
self.assertEquals("a b c", found)
def testSplitListWithLabels(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {VAR;}
a : first=ID others+=ID* -> $first VAR $others+ ;
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b c")
self.assertEquals("a VAR b c", found)
def testComplicatedMelange(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : A A b=B B b=B c+=C C c+=C D {s=$D.text} -> A+ B+ C+ D ;
type : 'int' | 'float' ;
A : 'a' ;
B : 'b' ;
C : 'c' ;
D : 'd' ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a a b b b c c c d")
self.assertEquals("a a b b b c c c d", found)
def testRuleLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x=b -> $x;
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("a", found)
def testAmbiguousRule(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID a -> a | INT ;
ID : 'a'..'z'+ ;
INT: '0'..'9'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar,
"a", "abc 34")
self.assertEquals("34", found)
def testRuleListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x+=b x+=b -> $x+;
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testRuleListLabel2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x+=b x+=b -> $x $x*;
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testOptional(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x=b (y=b)? -> $x $y?;
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("a", found)
def testOptional2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x=ID (y=b)? -> $x $y?;
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testOptional3(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x=ID (y=b)? -> ($x $y)?;
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testOptional4(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x+=ID (y=b)? -> ($x $y)?;
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("a b", found)
def testOptional5(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : ID -> ID? ; // match an ID to optional ID
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a")
self.assertEquals("a", found)
def testArbitraryExprType(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : x+=b x+=b -> {CommonTree(None)};
b : ID ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "a b")
self.assertEquals("", found)
def testSet(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a: (INT|ID)+ -> INT+ ID+ ;
INT: '0'..'9'+;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "2 a 34 de")
self.assertEquals("2 34 a de", found)
def testSet2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a: (INT|ID) -> INT? ID? ;
INT: '0'..'9'+;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "2")
self.assertEquals("2", found)
@testbase.broken("http://www.antlr.org:8888/browse/ANTLR-162",
antlr3.tree.RewriteEmptyStreamException)
def testSetWithLabel(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : x=(INT|ID) -> $x ;
INT: '0'..'9'+;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "2")
self.assertEquals("2", found)
def testRewriteAction(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens { FLOAT; }
r
: INT -> {CommonTree(CommonToken(type=FLOAT, text=$INT.text+".0"))}
;
INT : '0'..'9'+;
WS: (' ' | '\n' | '\t')+ {$channel = HIDDEN;};
''')
found = self.execParser(grammar, "r", "25")
self.assertEquals("25.0", found)
def testOptionalSubruleWithoutRealElements(self):
# copy type *and* modifier even though it's optional
# for each invocation of (...)+ in rewrite
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python;output=AST;}
tokens {PARMS;}
modulo
: 'modulo' ID ('(' parms+ ')')? -> ^('modulo' ID ^(PARMS parms+)?)
;
parms : '#'|ID;
ID : ('a'..'z' | 'A'..'Z')+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
found = self.execParser(grammar, "modulo", "modulo abc (x y #)")
self.assertEquals("(modulo abc (PARMS x y #))", found)
## C A R D I N A L I T Y I S S U E S
def testCardinality(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
tokens {BLOCK;}
a : ID ID INT INT INT -> (ID INT)+;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
try:
self.execParser(grammar, "a", "a b 3 4 5")
self.fail()
except antlr3.tree.RewriteCardinalityException:
pass
def testCardinality2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID+ -> ID ID ID ; // only 2 input IDs
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
try:
self.execParser(grammar, "a", "a b")
self.fail()
except antlr3.tree.RewriteCardinalityException:
pass
def testCardinality3(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID? INT -> ID INT ;
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
try:
self.execParser(grammar, "a", "3")
self.fail()
except antlr3.tree.RewriteEmptyStreamException:
pass
def testLoopCardinality(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID? INT -> ID+ INT ;
op : '+'|'-' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
try:
self.execParser(grammar, "a", "3")
self.fail()
except antlr3.tree.RewriteEarlyExitException:
pass
def testWildcard(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python;output=AST;}
a : ID c=. -> $c;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(grammar, "a", "abc 34")
self.assertEquals("34", found)
# E R R O R S
def testExtraTokenInSimpleDecl(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
tokens {EXPR;}
decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "decl", "int 34 x=1;",
expectErrors=True)
self.assertEquals(["line 1:4 extraneous input u'34' expecting ID"],
errors)
self.assertEquals("(EXPR int x 1)", found) # tree gets correct x and 1 tokens
#@testbase.broken("FIXME", AssertionError)
def testMissingIDInSimpleDecl(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
tokens {EXPR;}
decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "decl", "int =1;",
expectErrors=True)
self.assertEquals(["line 1:4 missing ID at u'='"], errors)
self.assertEquals("(EXPR int <missing ID> 1)", found) # tree gets invented ID token
def testMissingSetInSimpleDecl(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
tokens {EXPR;}
decl : type ID '=' INT ';' -> ^(EXPR type ID INT) ;
type : 'int' | 'float' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "decl", "x=1;",
expectErrors=True)
self.assertEquals(["line 1:0 mismatched input u'x' expecting set None"],
errors);
self.assertEquals("(EXPR <error: x> x 1)", found) # tree gets invented ID token
#@testbase.broken("FIXME", AssertionError)
def testMissingTokenGivesErrorNode(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : ID INT -> ID INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "abc",
expectErrors=True)
self.assertEquals(["line 0:-1 missing INT at '<EOF>'"], errors)
# doesn't do in-line recovery for sets (yet?)
self.assertEquals("abc <missing INT>", found)
def testExtraTokenGivesErrorNode(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : b c -> b c;
b : ID -> ID ;
c : INT -> INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "abc ick 34",
expectErrors=True)
self.assertEquals(["line 1:4 extraneous input u'ick' expecting INT"],
errors)
self.assertEquals("abc 34", found)
#@testbase.broken("FIXME", AssertionError)
def testMissingFirstTokenGivesErrorNode(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : ID INT -> ID INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
self.assertEquals(["line 1:0 missing ID at u'34'"], errors)
self.assertEquals("<missing ID> 34", found)
#@testbase.broken("FIXME", AssertionError)
def testMissingFirstTokenGivesErrorNode2(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : b c -> b c;
b : ID -> ID ;
c : INT -> INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "34", expectErrors=True)
# finds an error at the first token, 34, and re-syncs.
# re-synchronizing does not consume a token because 34 follows
# ref to rule b (start of c). It then matches 34 in c.
self.assertEquals(["line 1:0 missing ID at u'34'"], errors)
self.assertEquals("<missing ID> 34", found)
def testNoViableAltGivesErrorNode(self):
grammar = textwrap.dedent(
r'''
grammar foo;
options {language=Python;output=AST;}
a : b -> b | c -> c;
b : ID -> ID ;
c : INT -> INT ;
ID : 'a'..'z'+ ;
S : '*' ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found, errors = self.execParser(grammar, "a", "*", expectErrors=True)
# finds an error at the first token, 34, and re-syncs.
# re-synchronizing does not consume a token because 34 follows
# ref to rule b (start of c). It then matches 34 in c.
self.assertEquals(["line 1:0 no viable alternative at input u'*'"],
errors);
self.assertEquals("<unexpected: [@0,0:0=u'*',<6>,1:0], resync=*>",
found)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t040bug80(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def testValid1(self):
cStream = antlr3.StringStream('defined')
lexer = self.getLexer(cStream)
while True:
t = lexer.nextToken()
if t.type == antlr3.EOF:
break
print t
if __name__ == '__main__':
unittest.main()
| Python |
"""Testsuite for TokenRewriteStream class."""
# don't care about docstrings
# pylint: disable-msg=C0111
import unittest
import antlr3
import testbase
class T1(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def _parse(self, input):
cStream = antlr3.StringStream(input)
lexer = self.getLexer(cStream)
tStream = antlr3.TokenRewriteStream(lexer)
tStream.LT(1) # fill buffer
return tStream
def testInsertBeforeIndex0(self):
tokens = self._parse("abc")
tokens.insertBefore(0, "0")
result = tokens.toString()
expecting = "0abc"
self.failUnlessEqual(result, expecting)
def testInsertAfterLastIndex(self):
tokens = self._parse("abc")
tokens.insertAfter(2, "x")
result = tokens.toString()
expecting = "abcx"
self.failUnlessEqual(result, expecting)
def test2InsertBeforeAfterMiddleIndex(self):
tokens = self._parse("abc")
tokens.insertBefore(1, "x")
tokens.insertAfter(1, "x")
result = tokens.toString()
expecting = "axbxc"
self.failUnlessEqual(result, expecting)
def testReplaceIndex0(self):
tokens = self._parse("abc")
tokens.replace(0, "x")
result = tokens.toString()
expecting = "xbc"
self.failUnlessEqual(result, expecting)
def testReplaceLastIndex(self):
tokens = self._parse("abc")
tokens.replace(2, "x")
result = tokens.toString()
expecting = "abx"
self.failUnlessEqual(result, expecting)
def testReplaceMiddleIndex(self):
tokens = self._parse("abc")
tokens.replace(1, "x")
result = tokens.toString()
expecting = "axc"
self.failUnlessEqual(result, expecting)
def test2ReplaceMiddleIndex(self):
tokens = self._parse("abc")
tokens.replace(1, "x")
tokens.replace(1, "y")
result = tokens.toString()
expecting = "ayc"
self.failUnlessEqual(result, expecting)
def test2ReplaceMiddleIndex1InsertBefore(self):
tokens = self._parse("abc")
tokens.insertBefore(0, "_")
tokens.replace(1, "x")
tokens.replace(1, "y")
result = tokens.toString()
expecting = "_ayc"
self.failUnlessEqual(expecting, result)
def testReplaceThenDeleteMiddleIndex(self):
tokens = self._parse("abc")
tokens.replace(1, "x")
tokens.delete(1)
result = tokens.toString()
expecting = "ac"
self.failUnlessEqual(result, expecting)
def testInsertInPriorReplace(self):
tokens = self._parse("abc")
tokens.replace(0, 2, "x")
tokens.insertBefore(1, "0")
try:
tokens.toString()
self.fail()
except ValueError, exc:
self.failUnlessEqual(
str(exc),
"insert op <InsertBeforeOp@1:\"0\"> within boundaries of "
"previous <ReplaceOp@0..2:\"x\">"
)
def testInsertThenReplaceSameIndex(self):
tokens = self._parse("abc")
tokens.insertBefore(0, "0")
tokens.replace(0, "x") # supercedes insert at 0
result = tokens.toString()
expecting = "xbc"
self.failUnlessEqual(result, expecting)
def test2InsertMiddleIndex(self):
tokens = self._parse("abc")
tokens.insertBefore(1, "x")
tokens.insertBefore(1, "y")
result = tokens.toString()
expecting = "ayxbc"
self.failUnlessEqual(result, expecting)
def test2InsertThenReplaceIndex0(self):
tokens = self._parse("abc")
tokens.insertBefore(0, "x")
tokens.insertBefore(0, "y")
tokens.replace(0, "z")
result = tokens.toString()
expecting = "zbc"
self.failUnlessEqual(result, expecting)
def testReplaceThenInsertBeforeLastIndex(self):
tokens = self._parse("abc")
tokens.replace(2, "x")
tokens.insertBefore(2, "y")
result = tokens.toString()
expecting = "abyx"
self.failUnlessEqual(result, expecting)
def testInsertThenReplaceLastIndex(self):
tokens = self._parse("abc")
tokens.insertBefore(2, "y")
tokens.replace(2, "x")
result = tokens.toString()
expecting = "abx"
self.failUnlessEqual(result, expecting)
def testReplaceThenInsertAfterLastIndex(self):
tokens = self._parse("abc")
tokens.replace(2, "x")
tokens.insertAfter(2, "y")
result = tokens.toString()
expecting = "abxy"
self.failUnlessEqual(result, expecting)
def testReplaceRangeThenInsertAtLeftEdge(self):
tokens = self._parse("abcccba")
tokens.replace(2, 4, "x")
tokens.insertBefore(2, "y")
result = tokens.toString()
expecting = "abyxba"
self.failUnlessEqual(result, expecting)
def testReplaceRangeThenInsertAtRightEdge(self):
tokens = self._parse("abcccba")
tokens.replace(2, 4, "x")
tokens.insertBefore(4, "y") # no effect; within range of a replace
try:
tokens.toString()
self.fail()
except ValueError, exc:
self.failUnlessEqual(
str(exc),
"insert op <InsertBeforeOp@4:\"y\"> within boundaries of "
"previous <ReplaceOp@2..4:\"x\">")
def testReplaceRangeThenInsertAfterRightEdge(self):
tokens = self._parse("abcccba")
tokens.replace(2, 4, "x")
tokens.insertAfter(4, "y")
result = tokens.toString()
expecting = "abxyba"
self.failUnlessEqual(result, expecting)
def testReplaceAll(self):
tokens = self._parse("abcccba")
tokens.replace(0, 6, "x")
result = tokens.toString()
expecting = "x"
self.failUnlessEqual(result, expecting)
def testReplaceSubsetThenFetch(self):
tokens = self._parse("abcccba")
tokens.replace(2, 4, "xyz")
result = tokens.toString(0, 6)
expecting = "abxyzba"
self.failUnlessEqual(result, expecting)
def testReplaceThenReplaceSuperset(self):
tokens = self._parse("abcccba")
tokens.replace(2, 4, "xyz")
tokens.replace(3, 5, "foo") # overlaps, error
try:
tokens.toString()
self.fail()
except ValueError, exc:
self.failUnlessEqual(
str(exc),
"replace op boundaries of <ReplaceOp@3..5:\"foo\"> overlap "
"with previous <ReplaceOp@2..4:\"xyz\">")
def testReplaceThenReplaceLowerIndexedSuperset(self):
tokens = self._parse("abcccba")
tokens.replace(2, 4, "xyz")
tokens.replace(1, 3, "foo") # overlap, error
try:
tokens.toString()
self.fail()
except ValueError, exc:
self.failUnlessEqual(
str(exc),
"replace op boundaries of <ReplaceOp@1..3:\"foo\"> overlap "
"with previous <ReplaceOp@2..4:\"xyz\">")
def testReplaceSingleMiddleThenOverlappingSuperset(self):
tokens = self._parse("abcba")
tokens.replace(2, 2, "xyz")
tokens.replace(0, 3, "foo")
result = tokens.toString()
expecting = "fooa"
self.failUnlessEqual(result, expecting)
def testCombineInserts(self):
tokens = self._parse("abc")
tokens.insertBefore(0, "x")
tokens.insertBefore(0, "y")
result = tokens.toString()
expecting = "yxabc"
self.failUnlessEqual(expecting, result)
def testCombine3Inserts(self):
tokens = self._parse("abc")
tokens.insertBefore(1, "x")
tokens.insertBefore(0, "y")
tokens.insertBefore(1, "z")
result = tokens.toString()
expecting = "yazxbc"
self.failUnlessEqual(expecting, result)
def testCombineInsertOnLeftWithReplace(self):
tokens = self._parse("abc")
tokens.replace(0, 2, "foo")
tokens.insertBefore(0, "z") # combine with left edge of rewrite
result = tokens.toString()
expecting = "zfoo"
self.failUnlessEqual(expecting, result)
def testCombineInsertOnLeftWithDelete(self):
tokens = self._parse("abc")
tokens.delete(0, 2)
tokens.insertBefore(0, "z") # combine with left edge of rewrite
result = tokens.toString()
expecting = "z" # make sure combo is not znull
self.failUnlessEqual(expecting, result)
def testDisjointInserts(self):
tokens = self._parse("abc")
tokens.insertBefore(1, "x")
tokens.insertBefore(2, "y")
tokens.insertBefore(0, "z")
result = tokens.toString()
expecting = "zaxbyc"
self.failUnlessEqual(expecting, result)
def testOverlappingReplace(self):
tokens = self._parse("abcc")
tokens.replace(1, 2, "foo")
tokens.replace(0, 3, "bar") # wipes prior nested replace
result = tokens.toString()
expecting = "bar"
self.failUnlessEqual(expecting, result)
def testOverlappingReplace2(self):
tokens = self._parse("abcc")
tokens.replace(0, 3, "bar")
tokens.replace(1, 2, "foo") # cannot split earlier replace
try:
tokens.toString()
self.fail()
except ValueError, exc:
self.failUnlessEqual(
str(exc),
"replace op boundaries of <ReplaceOp@1..2:\"foo\"> overlap "
"with previous <ReplaceOp@0..3:\"bar\">")
def testOverlappingReplace3(self):
tokens = self._parse("abcc")
tokens.replace(1, 2, "foo")
tokens.replace(0, 2, "bar") # wipes prior nested replace
result = tokens.toString()
expecting = "barc"
self.failUnlessEqual(expecting, result)
def testOverlappingReplace4(self):
tokens = self._parse("abcc")
tokens.replace(1, 2, "foo")
tokens.replace(1, 3, "bar") # wipes prior nested replace
result = tokens.toString()
expecting = "abar"
self.failUnlessEqual(expecting, result)
def testDropIdenticalReplace(self):
tokens = self._parse("abcc")
tokens.replace(1, 2, "foo")
tokens.replace(1, 2, "foo") # drop previous, identical
result = tokens.toString()
expecting = "afooc"
self.failUnlessEqual(expecting, result)
def testDropPrevCoveredInsert(self):
tokens = self._parse("abcc")
tokens.insertBefore(1, "foo")
tokens.replace(1, 2, "foo") # kill prev insert
result = tokens.toString()
expecting = "afooc"
self.failUnlessEqual(expecting, result)
def testLeaveAloneDisjointInsert(self):
tokens = self._parse("abcc")
tokens.insertBefore(1, "x")
tokens.replace(2, 3, "foo")
result = tokens.toString()
expecting = "axbfoo"
self.failUnlessEqual(expecting, result)
def testLeaveAloneDisjointInsert2(self):
tokens = self._parse("abcc")
tokens.replace(2, 3, "foo")
tokens.insertBefore(1, "x")
result = tokens.toString()
expecting = "axbfoo"
self.failUnlessEqual(expecting, result)
class T2(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar('t048rewrite2.g')
def _parse(self, input):
cStream = antlr3.StringStream(input)
lexer = self.getLexer(cStream)
tStream = antlr3.TokenRewriteStream(lexer)
tStream.LT(1) # fill buffer
return tStream
def testToStringStartStop(self):
# Tokens: 0123456789
# Input: x = 3 * 0
tokens = self._parse("x = 3 * 0;")
tokens.replace(4, 8, "0") # replace 3 * 0 with 0
result = tokens.toOriginalString()
expecting = "x = 3 * 0;"
self.failUnlessEqual(expecting, result)
result = tokens.toString()
expecting = "x = 0;"
self.failUnlessEqual(expecting, result)
result = tokens.toString(0, 9)
expecting = "x = 0;"
self.failUnlessEqual(expecting, result)
result = tokens.toString(4, 8)
expecting = "0"
self.failUnlessEqual(expecting, result)
def testToStringStartStop2(self):
# Tokens: 012345678901234567
# Input: x = 3 * 0 + 2 * 0
tokens = self._parse("x = 3 * 0 + 2 * 0;")
result = tokens.toOriginalString()
expecting = "x = 3 * 0 + 2 * 0;"
self.failUnlessEqual(expecting, result)
tokens.replace(4, 8, "0") # replace 3 * 0 with 0
result = tokens.toString()
expecting = "x = 0 + 2 * 0;"
self.failUnlessEqual(expecting, result)
result = tokens.toString(0, 17)
expecting = "x = 0 + 2 * 0;"
self.failUnlessEqual(expecting, result)
result = tokens.toString(4, 8)
expecting = "0"
self.failUnlessEqual(expecting, result)
result = tokens.toString(0, 8)
expecting = "x = 0"
self.failUnlessEqual(expecting, result)
result = tokens.toString(12, 16)
expecting = "2 * 0"
self.failUnlessEqual(expecting, result)
tokens.insertAfter(17, "// comment")
result = tokens.toString(12, 17)
expecting = "2 * 0;// comment"
self.failUnlessEqual(expecting, result)
result = tokens.toString(0, 8) # try again after insert at end
expecting = "x = 0"
self.failUnlessEqual(expecting, result)
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
import sys
class T(testbase.ANTLRTest):
def parserClass(self, base):
class TParser(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def lexerClass(self, base):
class TLexer(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def execParser(self, grammar, grammarEntry, input):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
r = getattr(parser, grammarEntry)()
if r is not None:
return r.tree.toStringTree()
return ""
def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
walkerCls = self.compileInlineGrammar(treeGrammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
r = getattr(parser, grammarEntry)()
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = walkerCls(nodes)
r = getattr(walker, treeEntry)()
if r is not None:
return r.tree.toStringTree()
return ""
# PARSERS -- AUTO AST
def testToken(self):
grammar = textwrap.dedent(
r'''
grammar T1;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : ID<V> ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a"
)
self.failUnlessEqual("a<V>", found)
def testTokenWithQualifiedType(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {
language=Python;
output=AST;
}
@members {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : ID<TParser.V> ; // TParser.V is qualified name
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a"
)
self.failUnlessEqual("a<V>", found)
def testTokenWithLabel(self):
grammar = textwrap.dedent(
r'''
grammar T2;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : x=ID<V> ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a"
)
self.failUnlessEqual("a<V>", found)
def testTokenWithListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T3;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : x+=ID<V> ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a"
)
self.failUnlessEqual("a<V>", found)
def testTokenRoot(self):
grammar = textwrap.dedent(
r'''
grammar T4;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : ID<V>^ ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a"
)
self.failUnlessEqual("a<V>", found)
def testTokenRootWithListLabel(self):
grammar = textwrap.dedent(
r'''
grammar T5;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : x+=ID<V>^ ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a"
)
self.failUnlessEqual("a<V>", found)
def testString(self):
grammar = textwrap.dedent(
r'''
grammar T6;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : 'begin'<V> ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="begin"
)
self.failUnlessEqual("begin<V>", found)
def testStringRoot(self):
grammar = textwrap.dedent(
r'''
grammar T7;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : 'begin'<V>^ ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="begin"
)
self.failUnlessEqual("begin<V>", found)
# PARSERS -- REWRITE AST
def testRewriteToken(self):
grammar = textwrap.dedent(
r'''
grammar T8;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : ID -> ID<V> ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a"
)
self.failUnlessEqual("a<V>", found)
def testRewriteTokenWithArgs(self):
grammar = textwrap.dedent(
r'''
grammar T9;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def __init__(self, *args):
if len(args) == 4:
ttype = args[0]
x = args[1]
y = args[2]
z = args[3]
token = CommonToken(type=ttype, text="")
elif len(args) == 3:
ttype = args[0]
token = args[1]
x = args[2]
y, z = 0, 0
else:
raise TypeError("Invalid args \%r" \% (args,))
CommonTree.__init__(self, token)
self.x = x
self.y = y
self.z = z
def toString(self):
txt = ""
if self.token is not None:
txt += self.token.text
txt +="<V>;\%d\%d\%d" \% (self.x, self.y, self.z)
return txt
__str__ = toString
}
a : ID -> ID<V>[42,19,30] ID<V>[$ID,99];
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a"
)
self.failUnlessEqual("<V>;421930 a<V>;9900", found)
def testRewriteTokenRoot(self):
grammar = textwrap.dedent(
r'''
grammar T10;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : ID INT -> ^(ID<V> INT) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="a 2"
)
self.failUnlessEqual("(a<V> 2)", found)
def testRewriteString(self):
grammar = textwrap.dedent(
r'''
grammar T11;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : 'begin' -> 'begin'<V> ;
ID : 'a'..'z'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="begin"
)
self.failUnlessEqual("begin<V>", found)
def testRewriteStringRoot(self):
grammar = textwrap.dedent(
r'''
grammar T12;
options {
language=Python;
output=AST;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
}
a : 'begin' INT -> ^('begin'<V> INT) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+ ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
found = self.execParser(
grammar, 'a',
input="begin 2"
)
self.failUnlessEqual("(begin<V> 2)", found)
# TREE PARSERS -- REWRITE AST
def testTreeParserRewriteFlatList(self):
grammar = textwrap.dedent(
r'''
grammar T13;
options {
language=Python;
output=AST;
}
a : ID INT;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP13;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T13;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
class W(CommonTree):
def toString(self):
return self.token.text + "<W>"
__str__ = toString
}
a : ID INT -> INT<V> ID<W>
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
input="abc 34"
)
self.failUnlessEqual("34<V> abc<W>", found)
def testTreeParserRewriteTree(self):
grammar = textwrap.dedent(
r'''
grammar T14;
options {
language=Python;
output=AST;
}
a : ID INT;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP14;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T14;
}
@header {
class V(CommonTree):
def toString(self):
return self.token.text + "<V>"
__str__ = toString
class W(CommonTree):
def toString(self):
return self.token.text + "<W>"
__str__ = toString
}
a : ID INT -> ^(INT<V> ID<W>)
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
input="abc 34"
)
self.failUnlessEqual("(34<V> abc<W>)", found)
def testTreeParserRewriteImaginary(self):
grammar = textwrap.dedent(
r'''
grammar T15;
options {
language=Python;
output=AST;
}
a : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP15;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T15;
}
tokens { ROOT; }
@header {
class V(CommonTree):
def __init__(self, tokenType):
CommonTree.__init__(self, CommonToken(tokenType))
def toString(self):
return tokenNames[self.token.type] + "<V>"
__str__ = toString
}
a : ID -> ROOT<V> ID
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
input="abc"
)
self.failUnlessEqual("ROOT<V> abc", found)
def testTreeParserRewriteImaginaryWithArgs(self):
grammar = textwrap.dedent(
r'''
grammar T16;
options {
language=Python;
output=AST;
}
a : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP16;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T16;
}
tokens { ROOT; }
@header {
class V(CommonTree):
def __init__(self, tokenType, x):
CommonTree.__init__(self, CommonToken(tokenType))
self.x = x
def toString(self):
return tokenNames[self.token.type] + "<V>;" + str(self.x)
__str__ = toString
}
a : ID -> ROOT<V>[42] ID
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
input="abc"
)
self.failUnlessEqual("ROOT<V>;42 abc", found)
def testTreeParserRewriteImaginaryRoot(self):
grammar = textwrap.dedent(
r'''
grammar T17;
options {
language=Python;
output=AST;
}
a : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP17;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T17;
}
tokens { ROOT; }
@header {
class V(CommonTree):
def __init__(self, tokenType):
CommonTree.__init__(self, CommonToken(tokenType))
def toString(self):
return tokenNames[self.token.type] + "<V>"
__str__ = toString
}
a : ID -> ^(ROOT<V> ID)
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
input="abc"
)
self.failUnlessEqual("(ROOT<V> abc)", found)
def testTreeParserRewriteImaginaryFromReal(self):
grammar = textwrap.dedent(
r'''
grammar T18;
options {
language=Python;
output=AST;
}
a : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP18;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T18;
}
tokens { ROOT; }
@header {
class V(CommonTree):
def __init__(self, tokenType, tree=None):
if tree is None:
CommonTree.__init__(self, CommonToken(tokenType))
else:
CommonTree.__init__(self, tree)
self.token.type = tokenType
def toString(self):
return tokenNames[self.token.type]+"<V>@"+str(self.token.line)
__str__ = toString
}
a : ID -> ROOT<V>[$ID]
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
input="abc"
)
self.failUnlessEqual("ROOT<V>@1", found)
def testTreeParserAutoHeteroAST(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {
language=Python;
output=AST;
}
a : ID ';' ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T;
}
tokens { ROOT; }
@header {
class V(CommonTree):
def toString(self):
return CommonTree.toString(self) + "<V>"
__str__ = toString
}
a : ID<V> ';'<V>;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
input="abc;"
)
self.failUnlessEqual("abc<V> ;<V>", found)
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
class T(testbase.ANTLRTest):
def walkerClass(self, base):
class TWalker(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self.buf = ""
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TWalker
def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
walkerCls = self.compileInlineGrammar(treeGrammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
r = getattr(parser, grammarEntry)()
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = walkerCls(nodes)
r = getattr(walker, treeEntry)()
if r.tree is not None:
return r.tree.toStringTree()
return ""
def testFlatList(self):
grammar = textwrap.dedent(
r'''
grammar T1;
options {
language=Python;
output=AST;
}
a : ID INT;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP1;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T1;
}
a : ID INT -> INT ID;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("34 abc", found)
def testSimpleTree(self):
grammar = textwrap.dedent(
r'''
grammar T2;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP2;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T2;
}
a : ^(ID INT) -> ^(INT ID);
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("(34 abc)", found)
def testCombinedRewriteAndAuto(self):
grammar = textwrap.dedent(
r'''
grammar T3;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT) | INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP3;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T3;
}
a : ^(ID INT) -> ^(INT ID) | INT;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("(34 abc)", found)
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"34"
)
self.failUnlessEqual("34", found)
def testAvoidDup(self):
grammar = textwrap.dedent(
r'''
grammar T4;
options {
language=Python;
output=AST;
}
a : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP4;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T4;
}
a : ID -> ^(ID ID);
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc"
)
self.failUnlessEqual("(abc abc)", found)
def testLoop(self):
grammar = textwrap.dedent(
r'''
grammar T5;
options {
language=Python;
output=AST;
}
a : ID+ INT+ -> (^(ID INT))+ ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP5;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T5;
}
a : (^(ID INT))+ -> INT+ ID+;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a b c 3 4 5"
)
self.failUnlessEqual("3 4 5 a b c", found)
def testAutoDup(self):
grammar = textwrap.dedent(
r'''
grammar T6;
options {
language=Python;
output=AST;
}
a : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP6;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T6;
}
a : ID;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc"
)
self.failUnlessEqual("abc", found)
def testAutoDupRule(self):
grammar = textwrap.dedent(
r'''
grammar T7;
options {
language=Python;
output=AST;
}
a : ID INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP7;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T7;
}
a : b c ;
b : ID ;
c : INT ;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 1"
)
self.failUnlessEqual("a 1", found)
def testAutoDupMultiple(self):
grammar = textwrap.dedent(
r'''
grammar T8;
options {
language=Python;
output=AST;
}
a : ID ID INT;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP8;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T8;
}
a : ID ID INT
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a b 3"
)
self.failUnlessEqual("a b 3", found)
def testAutoDupTree(self):
grammar = textwrap.dedent(
r'''
grammar T9;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP9;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T9;
}
a : ^(ID INT)
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 3"
)
self.failUnlessEqual("(a 3)", found)
def testAutoDupTreeWithLabels(self):
grammar = textwrap.dedent(
r'''
grammar T10;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP10;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T10;
}
a : ^(x=ID y=INT)
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 3"
)
self.failUnlessEqual("(a 3)", found)
def testAutoDupTreeWithListLabels(self):
grammar = textwrap.dedent(
r'''
grammar T11;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP11;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T11;
}
a : ^(x+=ID y+=INT)
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 3"
)
self.failUnlessEqual("(a 3)", found)
def testAutoDupTreeWithRuleRoot(self):
grammar = textwrap.dedent(
r'''
grammar T12;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP12;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T12;
}
a : ^(b INT) ;
b : ID ;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 3"
)
self.failUnlessEqual("(a 3)", found)
def testAutoDupTreeWithRuleRootAndLabels(self):
grammar = textwrap.dedent(
r'''
grammar T13;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP13;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T13;
}
a : ^(x=b INT) ;
b : ID ;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 3"
)
self.failUnlessEqual("(a 3)", found)
def testAutoDupTreeWithRuleRootAndListLabels(self):
grammar = textwrap.dedent(
r'''
grammar T14;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP14;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T14;
}
a : ^(x+=b y+=c) ;
b : ID ;
c : INT ;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 3"
)
self.failUnlessEqual("(a 3)", found)
def testAutoDupNestedTree(self):
grammar = textwrap.dedent(
r'''
grammar T15;
options {
language=Python;
output=AST;
}
a : x=ID y=ID INT -> ^($x ^($y INT));
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP15;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T15;
}
a : ^(ID ^(ID INT))
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a b 3"
)
self.failUnlessEqual("(a (b 3))", found)
def testDelete(self):
grammar = textwrap.dedent(
r'''
grammar T16;
options {
language=Python;
output=AST;
}
a : ID ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP16;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T16;
}
a : ID ->
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc"
)
self.failUnlessEqual("", found)
def testSetMatchNoRewrite(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {
language=Python;
output=AST;
}
a : ID INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T;
}
a : b INT;
b : ID | INT;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("abc 34", found)
def testSetOptionalMatchNoRewrite(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {
language=Python;
output=AST;
}
a : ID INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T;
}
a : (ID|INT)? INT ;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34")
self.failUnlessEqual("abc 34", found)
def testSetMatchNoRewriteLevel2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {
language=Python;
output=AST;
}
a : x=ID INT -> ^($x INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T;
}
a : ^(ID (ID | INT) ) ;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("(abc 34)", found)
def testSetMatchNoRewriteLevel2Root(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {
language=Python;
output=AST;
}
a : x=ID INT -> ^($x INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T;
}
a : ^((ID | INT) INT) ;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("(abc 34)", found)
## REWRITE MODE
def testRewriteModeCombinedRewriteAndAuto(self):
grammar = textwrap.dedent(
r'''
grammar T17;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT) | INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP17;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T17;
rewrite=true;
}
a : ^(ID INT) -> ^(ID["ick"] INT)
| INT // leaves it alone, returning $a.start
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("(ick 34)", found)
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"34"
)
self.failUnlessEqual("34", found)
def testRewriteModeFlatTree(self):
grammar = textwrap.dedent(
r'''
grammar T18;
options {
language=Python;
output=AST;
}
a : ID INT -> ID INT | INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP18;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T18;
rewrite=true;
}
s : ID a ;
a : INT -> INT["1"]
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34"
)
self.assertEquals("abc 1", found)
def testRewriteModeChainRuleFlatTree(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python; output=AST;}
a : ID INT -> ID INT | INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : a ;
a : b ;
b : ID INT -> INT ID
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34")
self.assertEquals("34 abc", found)
def testRewriteModeChainRuleTree(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python; output=AST;}
a : ID INT -> ^(ID INT) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : a ;
a : b ; // a.tree must become b.tree
b : ^(ID INT) -> INT
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34")
self.assertEquals("34", found)
def testRewriteModeChainRuleTree2(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python; output=AST;}
a : ID INT -> ^(ID INT) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
tokens { X; }
s : a* b ; // only b contributes to tree, but it's after a*; s.tree = b.tree
a : X ;
b : ^(ID INT) -> INT
;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34")
self.assertEquals("34", found)
def testRewriteModeChainRuleTree3(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {language=Python; output=AST;}
a : 'boo' ID INT -> 'boo' ^(ID INT) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
tokens { X; }
s : 'boo' a* b ; // don't reset s.tree to b.tree due to 'boo'
a : X ;
b : ^(ID INT) -> INT
;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"boo abc 34")
self.assertEquals("boo 34", found)
def testRewriteModeChainRuleTree4(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
tokens { X; }
s : ^('boo' a* b) ; // don't reset s.tree to b.tree due to 'boo'
a : X ;
b : ^(ID INT) -> INT
;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"boo abc 34")
self.assertEquals("(boo 34)", found)
def testRewriteModeChainRuleTree5(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : 'boo' ID INT -> ^('boo' ^(ID INT)) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
tokens { X; }
s : ^(a b) ; // s.tree is a.tree
a : 'boo' ;
b : ^(ID INT) -> INT
;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"boo abc 34")
self.assertEquals("(boo 34)", found)
def testRewriteOfRuleRef(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : ID INT -> ID INT | INT ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : a -> a ;
a : ID INT -> ID INT ;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34")
self.failUnlessEqual("abc 34", found)
def testRewriteOfRuleRefRoot(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : ID INT INT -> ^(INT ^(ID INT));
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : ^(a ^(ID INT)) -> a ;
a : INT ;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 12 34")
# emits whole tree when you ref the root since I can't know whether
# you want the children or not. You might be returning a whole new
# tree. Hmm...still seems weird. oh well.
self.failUnlessEqual("(12 (abc 34))", found)
def testRewriteOfRuleRefRootLabeled(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : ID INT INT -> ^(INT ^(ID INT));
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : ^(label=a ^(ID INT)) -> a ;
a : INT ;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 12 34")
# emits whole tree when you ref the root since I can't know whether
# you want the children or not. You might be returning a whole new
# tree. Hmm...still seems weird. oh well.
self.failUnlessEqual("(12 (abc 34))", found)
def testRewriteOfRuleRefRootListLabeled(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : ID INT INT -> ^(INT ^(ID INT));
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : ^(label+=a ^(ID INT)) -> a ;
a : INT ;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 12 34")
# emits whole tree when you ref the root since I can't know whether
# you want the children or not. You might be returning a whole new
# tree. Hmm...still seems weird. oh well.
self.failUnlessEqual("(12 (abc 34))", found)
def testRewriteOfRuleRefChild(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : ID INT -> ^(ID ^(INT INT));
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : ^(ID a) -> a ;
a : ^(INT INT) ;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34")
self.failUnlessEqual("(34 34)", found)
def testRewriteOfRuleRefLabel(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : ID INT -> ^(ID ^(INT INT));
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : ^(ID label=a) -> a ;
a : ^(INT INT) ;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34")
self.failUnlessEqual("(34 34)", found)
def testRewriteOfRuleRefListLabel(self):
grammar = textwrap.dedent(
r"""
grammar T;
options {language=Python; output=AST;}
a : ID INT -> ^(ID ^(INT INT));
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r"""
tree grammar TP;
options {language=Python; output=AST; ASTLabelType=CommonTree; tokenVocab=T; rewrite=true;}
s : ^(ID label+=a) -> a ;
a : ^(INT INT) ;
""")
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34")
self.failUnlessEqual("(34 34)", found)
def testRewriteModeWithPredicatedRewrites(self):
grammar = textwrap.dedent(
r'''
grammar T19;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID["root"] ^(ID INT)) | INT -> ^(ID["root"] INT) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP19;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T19;
rewrite=true;
}
s : ^(ID a) { self.buf += $s.start.toStringTree() };
a : ^(ID INT) -> {True}? ^(ID["ick"] INT)
-> INT
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34"
)
self.failUnlessEqual("(root (ick 34))", found)
def testWildcard(self):
grammar = textwrap.dedent(
r'''
grammar T;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID["root"] INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''
tree grammar TP;
options {
language=Python;
output=AST;
ASTLabelType=CommonTree;
tokenVocab=T;
}
s : ^(ID c=.) -> $c
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 's',
"abc 34"
)
self.failUnlessEqual("34", found)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t033backtracking(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream('int a;')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.translation_unit()
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t001lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('0')
lexer = self.getLexer(stream)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.ZERO)
token = lexer.nextToken()
self.failUnlessEqual(token.type, self.lexerModule.EOF)
def testIteratorInterface(self):
stream = antlr3.StringStream('0')
lexer = self.getLexer(stream)
types = [token.type for token in lexer]
self.failUnlessEqual(types, [self.lexerModule.ZERO])
def testMalformedInput(self):
stream = antlr3.StringStream('1')
lexer = self.getLexer(stream)
try:
token = lexer.nextToken()
self.fail()
except antlr3.MismatchedTokenException, exc:
self.failUnlessEqual(exc.expecting, '0')
self.failUnlessEqual(exc.unexpectedType, '1')
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t006lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('fofaaooa')
lexer = self.getLexer(stream)
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 0, token.start
assert token.stop == 1, token.stop
assert token.text == 'fo', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.FOO
assert token.start == 2, token.start
assert token.stop == 7, token.stop
assert token.text == 'faaooa', token.text
token = lexer.nextToken()
assert token.type == self.lexerModule.EOF
def testMalformedInput(self):
stream = antlr3.StringStream('fofoaooaoa2')
lexer = self.getLexer(stream)
lexer.nextToken()
lexer.nextToken()
try:
token = lexer.nextToken()
raise AssertionError, token
except antlr3.MismatchedTokenException, exc:
assert exc.expecting == 'f', repr(exc.expecting)
assert exc.unexpectedType == '2', repr(exc.unexpectedType)
assert exc.charPositionInLine == 10, repr(exc.charPositionInLine)
assert exc.line == 1, repr(exc.line)
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t034tokenLabelPropertyRef(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def parserClass(self, base):
class TParser(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def testValid1(self):
cStream = antlr3.StringStream(' a')
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
events = parser.a()
if __name__ == '__main__':
unittest.main()
| Python |
import antlr3
import testbase
import unittest
class t015calc(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def _evaluate(self, expr, expected, errors=[]):
cStream = antlr3.StringStream(expr)
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
result = parser.evaluate()
assert result == expected, "%r != %r" % (result, expected)
assert len(parser.reportedErrors) == len(errors), parser.reportedErrors
def testValid01(self):
self._evaluate("1 + 2", 3)
def testValid02(self):
self._evaluate("1 + 2 * 3", 7)
def testValid03(self):
self._evaluate("10 / 2", 5)
def testValid04(self):
self._evaluate("6 + 2*(3+1) - 4", 10)
def testMalformedInput(self):
self._evaluate("6 - (2*1", 4, ["mismatched token at pos 8"])
# FIXME: most parse errors result in TypeErrors in action code, because
# rules return None, which is then added/multiplied... to integers.
# evaluate("6 - foo 2", 4, ["some error"])
if __name__ == '__main__':
unittest.main()
| Python |
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
class T(testbase.ANTLRTest):
def walkerClass(self, base):
class TWalker(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TWalker
def execTreeParser(self, grammar, grammarEntry, treeGrammar, treeEntry, input):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
walkerCls = self.compileInlineGrammar(treeGrammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
r = getattr(parser, grammarEntry)()
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = walkerCls(nodes)
getattr(walker, treeEntry)()
return walker._output
def testFlatList(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=AST;
}
a : ID INT;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a : ID INT
{self.capture("\%s, \%s" \% ($ID, $INT))}
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("abc, 34", found)
def testSimpleTree(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=AST;
}
a : ID INT -> ^(ID INT);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a : ^(ID INT)
{self.capture(str($ID)+", "+str($INT))}
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc 34"
)
self.failUnlessEqual("abc, 34", found)
def testFlatVsTreeDecision(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=AST;
}
a : b c ;
b : ID INT -> ^(ID INT);
c : ID INT;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a : b b ;
b : ID INT {self.capture(str($ID)+" "+str($INT)+'\n')}
| ^(ID INT) {self.capture("^("+str($ID)+" "+str($INT)+')');}
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 1 b 2"
)
self.failUnlessEqual("^(a 1)b 2\n", found)
def testFlatVsTreeDecision2(self):
grammar = textwrap.dedent(
r"""grammar T;
options {
language=Python;
output=AST;
}
a : b c ;
b : ID INT+ -> ^(ID INT+);
c : ID INT+;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
""")
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a : b b ;
b : ID INT+ {self.capture(str($ID)+" "+str($INT)+"\n")}
| ^(x=ID (y=INT)+) {self.capture("^("+str($x)+' '+str($y)+')')}
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 1 2 3 b 4 5"
)
self.failUnlessEqual("^(a 3)b 5\n", found)
def testCyclicDFALookahead(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=AST;
}
a : ID INT+ PERIOD;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
SEMI : ';' ;
PERIOD : '.' ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a : ID INT+ PERIOD {self.capture("alt 1")}
| ID INT+ SEMI {self.capture("alt 2")}
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"a 1 2 3."
)
self.failUnlessEqual("alt 1", found)
## def testTemplateOutput(self):
## String grammar =
## "grammar T;\n" +
## "options {output=AST;}\n" +
## "a : ID INT;\n" +
## "ID : 'a'..'z'+ ;\n" +
## "INT : '0'..'9'+;\n" +
## "WS : (' '|'\\n') {$channel=HIDDEN;} ;\n";
## String treeGrammar =
## "tree grammar TP;\n" +
## "options {output=template; ASTLabelType=CommonTree;}\n" +
## "s : a {System.out.println($a.st);};\n" +
## "a : ID INT -> {new StringTemplate($INT.text)}\n" +
## " ;\n";
## String found = execTreeParser("T.g", grammar, "TParser", "TP.g",
## treeGrammar, "TP", "TLexer", "a", "s", "abc 34");
## assertEquals("34\n", found);
## }
def testNullableChildList(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=AST;
}
a : ID INT? -> ^(ID INT?);
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a : ^(ID INT?)
{self.capture(str($ID))}
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc"
)
self.failUnlessEqual("abc", found)
def testNullableChildList2(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=AST;
}
a : ID INT? SEMI -> ^(ID INT?) SEMI ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
SEMI : ';' ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a : ^(ID INT?) SEMI
{self.capture(str($ID))}
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc;"
)
self.failUnlessEqual("abc", found)
def testNullableChildList3(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=AST;
}
a : x=ID INT? (y=ID)? SEMI -> ^($x INT? $y?) SEMI ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
SEMI : ';' ;
WS : (' '|'\\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a : ^(ID INT? b) SEMI
{self.capture(str($ID)+", "+str($b.text))}
;
b : ID? ;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc def;"
)
self.failUnlessEqual("abc, def", found)
def testActionsAfterRoot(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=AST;
}
a : x=ID INT? SEMI -> ^($x INT?) ;
ID : 'a'..'z'+ ;
INT : '0'..'9'+;
SEMI : ';' ;
WS : (' '|'\n') {$channel=HIDDEN;} ;
''')
treeGrammar = textwrap.dedent(
r'''tree grammar TP;
options {
language=Python;
ASTLabelType=CommonTree;
}
a @init {x=0} : ^(ID {x=1} {x=2} INT?)
{self.capture(str($ID)+", "+str(x))}
;
''')
found = self.execTreeParser(
grammar, 'a',
treeGrammar, 'a',
"abc;"
)
self.failUnless("abc, 2\n", found)
if __name__ == '__main__':
unittest.main()
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c5"
DEFAULT_URL = "http://cheeseshop.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
try:
import setuptools
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
except ImportError:
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
import pkg_resources
try:
pkg_resources.require("setuptools>="+version)
except pkg_resources.VersionConflict, e:
# XXX could we install in a subprocess here?
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first.\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
# tell the user to uninstall obsolete version
use_setuptools(version)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
#!/usr/bin/env python
""" @namespace doxypy
doxypy is an input filter for Doxygen. It preprocesses python
files so that docstrings of classes and functions are extracted
as Doxygens special python documentation blocks. It can be found
at <http://code.foosel.net/doxypy>.
Copyright (C) 2006
Gina Haeussge (gina at foosel dot net),
Philippe Neumann (phil at foosel dot net)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
""" @mainpage
@author Gina Haeussge
@author Philippe Neumann
"""
import sys
import re
from optparse import OptionParser, OptionGroup
def makeCommentBlock(commentLines, indent=""):
""" Converts the given $commentLines into a comment block.
@param commentLines The lines of the block comment
@param indent The indentation of the block
@return The indented doxygen comment block containing the
given comment lines.
"""
joinStr = "\n%s# " % indent
if options.strip:
commentLines = map(lambda x: x.strip(), commentLines)
lines = joinStr.join(commentLines)
return "%s##%s%s" % (indent, joinStr, lines)
def parse(input):
""" Searches for def and class definitions in the source, then moves
existing docstrings as special doxygen block comments in front of the
definitions.
@param input The input source to process
@return The processed input.
"""
output = []
# Comment delimiter of the docstring
commentDelim = '"""'
# Some regexes
triggerRe = re.compile("^(\s*)(def .+:|class .+:)")
commentStartRe = re.compile('^\s*(%s)' % commentDelim)
commentEndRe = re.compile('(%s)\s*$' % commentDelim)
emptyRe = re.compile("^\s*$")
hashLineRe = re.compile("^\s*#.*$")
importLineRe = re.compile("^\s*(import |from .+ import)")
# split input into lines
lines = input.split("\n")
# flags, buffers, ...
fileHeadFlag = True
triggerWordFlag = False
commentFlag = False
comment = []
triggerWs = ""
triggerLines = None
# process each line
for line in enumerate(lines):
match = re.search(triggerRe, line[1])
if match:
if triggerWordFlag and triggerLines:
output.append("\n".join(triggerLines))
triggerWordFlag = True
triggerWs = match.group(1)
fileHeadFlag = False
triggerLines = [line[1]]
continue
# file header or active keyword trigger?
if fileHeadFlag or triggerWordFlag:
# comment end of multiline comment found
if re.search(commentEndRe, line[1]) and commentFlag:
comment.append( line[1][ : line[1].rfind(commentDelim) ] )
output.append(makeCommentBlock(comment, triggerWs))
if triggerLines:
output.append("\n".join(triggerLines))
comment = []
commentFlag = False
triggerWs = ""
triggerLines = None
triggerWordFlag = False
# comment start found
elif re.search(commentStartRe, line[1]):
if re.search(commentEndRe, line[1][line[1].find(commentDelim)+len(commentDelim) :]):
# singleline comment
comment.append(line[1][line[1].find(commentDelim)+len(commentDelim) : line[1].rfind(commentDelim)])
output.append(makeCommentBlock(comment, triggerWs))
if triggerLines:
output.append("\n".join(triggerLines))
comment = []
commentFlag = False
triggerWs = ""
triggerLines = None
triggerWordFlag = False
else:
# multiline comment begin
commentFlag = True
comment.append(
line[1][line[1].find(commentDelim)+len(commentDelim):]
)
# active multiline comment -> append comment
elif commentFlag:
comment.append(line[1])
# still searching for comment
elif re.search(emptyRe, line[1]):
if triggerLines:
triggerLines.append(line[1])
else:
output.append(line[1])
# searching for file header
elif fileHeadFlag:
if not (re.search(hashLineRe, line[1]) or re.search(emptyRe, line[1]) or re.search(importLineRe, line[1])):
# fileheader over -> disable search
fileHeadFlag = False
output.append(line[1])
# no comment, disable comment search mode
else:
triggerWordFlag = False
if triggerLines:
output.append("\n".join(triggerLines))
triggerLines = None
output.append(line[1])
# just append the line
else:
output.append(line[1])
# return output
return "\n".join(output)
def loadFile(filename):
""" Loads file $filename and returns the content.
@param filename The name of the file to load
@return The content of the file.
"""
f = open(filename, 'r')
try:
content = f.read()
return content
finally:
f.close()
def optParse():
"""parses commandline options"""
parser = OptionParser(prog="doxypy", version="%prog 0.2.1")
parser.set_usage("%prog [options] filename")
parser.add_option("--trim", "--strip",
action="store_true", dest="strip",
help="enables trimming of docstrings, might be useful if you get oddly spaced output"
)
## parse options
global options
(options, filename) = parser.parse_args()
if not filename:
print >>sys.stderr, "No filename given."
sys.exit(-1)
return filename[0]
def main():
""" Opens the file given as first commandline argument and processes it,
then prints out the processed file contents.
"""
filename = optParse()
try:
input = loadFile(filename)
except IOError, (errno, msg):
print >>sys.stderr, msg
sys.exit(-1)
output = parse(input)
print output
if __name__ == "__main__":
main() | Python |
# -*- coding: utf-8 -*-
import os
import unittest
from StringIO import StringIO
import textwrap
import stringtemplate3
from antlr3.dottreegen import toDOT
from antlr3.treewizard import TreeWizard
from antlr3.tree import CommonTreeAdaptor
class TestToDOT(unittest.TestCase):
"""Test case for the toDOT function."""
def setUp(self):
self.adaptor = CommonTreeAdaptor()
self.tokens = [
"", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"
]
self.wiz = TreeWizard(self.adaptor, self.tokens)
def testNone(self):
"""toDOT()"""
treeST = stringtemplate3.StringTemplate(
template=(
"digraph {\n" +
" $nodes$\n" +
" $edges$\n" +
"}\n")
)
edgeST = stringtemplate3.StringTemplate(
template="$parent$ -> $child$\n"
)
tree = self.wiz.create("(A B (B C C) (B (C D D)))")
st = toDOT(tree, self.adaptor, treeST, edgeST)
result = st.toString()
expected = textwrap.dedent(
'''\
digraph {
n0 [label="A"];
n1 [label="B"];
n2 [label="B"];
n3 [label="C"];
n4 [label="C"];
n5 [label="B"];
n6 [label="C"];
n7 [label="D"];
n8 [label="D"];
n0 -> n1
n0 -> n2
n2 -> n3
n2 -> n4
n0 -> n5
n5 -> n6
n6 -> n7
n6 -> n8
}
'''
)
self.assertEqual(result, expected)
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| Python |
# -*- coding: utf-8 -*-
import os
import unittest
from StringIO import StringIO
import antlr3
class TestStringStream(unittest.TestCase):
"""Test case for the StringStream class."""
def testSize(self):
"""StringStream.size()"""
stream = antlr3.StringStream('foo')
self.failUnlessEqual(stream.size(), 3)
def testIndex(self):
"""StringStream.index()"""
stream = antlr3.StringStream('foo')
self.failUnlessEqual(stream.index(), 0)
def testConsume(self):
"""StringStream.consume()"""
stream = antlr3.StringStream('foo\nbar')
stream.consume() # f
self.failUnlessEqual(stream.index(), 1)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.line, 1)
stream.consume() # o
self.failUnlessEqual(stream.index(), 2)
self.failUnlessEqual(stream.charPositionInLine, 2)
self.failUnlessEqual(stream.line, 1)
stream.consume() # o
self.failUnlessEqual(stream.index(), 3)
self.failUnlessEqual(stream.charPositionInLine, 3)
self.failUnlessEqual(stream.line, 1)
stream.consume() # \n
self.failUnlessEqual(stream.index(), 4)
self.failUnlessEqual(stream.charPositionInLine, 0)
self.failUnlessEqual(stream.line, 2)
stream.consume() # b
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.line, 2)
stream.consume() # a
self.failUnlessEqual(stream.index(), 6)
self.failUnlessEqual(stream.charPositionInLine, 2)
self.failUnlessEqual(stream.line, 2)
stream.consume() # r
self.failUnlessEqual(stream.index(), 7)
self.failUnlessEqual(stream.charPositionInLine, 3)
self.failUnlessEqual(stream.line, 2)
stream.consume() # EOF
self.failUnlessEqual(stream.index(), 7)
self.failUnlessEqual(stream.charPositionInLine, 3)
self.failUnlessEqual(stream.line, 2)
stream.consume() # EOF
self.failUnlessEqual(stream.index(), 7)
self.failUnlessEqual(stream.charPositionInLine, 3)
self.failUnlessEqual(stream.line, 2)
def testReset(self):
"""StringStream.reset()"""
stream = antlr3.StringStream('foo')
stream.consume()
stream.consume()
stream.reset()
self.failUnlessEqual(stream.index(), 0)
self.failUnlessEqual(stream.line, 1)
self.failUnlessEqual(stream.charPositionInLine, 0)
self.failUnlessEqual(stream.LT(1), 'f')
def testLA(self):
"""StringStream.LA()"""
stream = antlr3.StringStream('foo')
self.failUnlessEqual(stream.LT(1), 'f')
self.failUnlessEqual(stream.LT(2), 'o')
self.failUnlessEqual(stream.LT(3), 'o')
stream.consume()
stream.consume()
self.failUnlessEqual(stream.LT(1), 'o')
self.failUnlessEqual(stream.LT(2), antlr3.EOF)
self.failUnlessEqual(stream.LT(3), antlr3.EOF)
def testSubstring(self):
"""StringStream.substring()"""
stream = antlr3.StringStream('foobar')
self.failUnlessEqual(stream.substring(0, 0), 'f')
self.failUnlessEqual(stream.substring(0, 1), 'fo')
self.failUnlessEqual(stream.substring(0, 5), 'foobar')
self.failUnlessEqual(stream.substring(3, 5), 'bar')
def testSeekForward(self):
"""StringStream.seek(): forward"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
self.failUnlessEqual(stream.index(), 4)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 0)
self.failUnlessEqual(stream.LT(1), 'b')
## # not yet implemented
## def testSeekBackward(self):
## """StringStream.seek(): backward"""
## stream = antlr3.StringStream('foo\nbar')
## stream.seek(4)
## stream.seek(1)
## self.failUnlessEqual(stream.index(), 1)
## self.failUnlessEqual(stream.line, 1)
## self.failUnlessEqual(stream.charPositionInLine, 1)
## self.failUnlessEqual(stream.LA(1), 'o')
def testMark(self):
"""StringStream.mark()"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker = stream.mark()
self.failUnlessEqual(marker, 1)
self.failUnlessEqual(stream.markDepth, 1)
stream.consume()
marker = stream.mark()
self.failUnlessEqual(marker, 2)
self.failUnlessEqual(stream.markDepth, 2)
def testReleaseLast(self):
"""StringStream.release(): last marker"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.release()
self.failUnlessEqual(stream.markDepth, 1)
# release same marker again, nothing has changed
stream.release()
self.failUnlessEqual(stream.markDepth, 1)
def testReleaseNested(self):
"""StringStream.release(): nested"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.release(marker2)
self.failUnlessEqual(stream.markDepth, 1)
def testRewindLast(self):
"""StringStream.rewind(): last marker"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker = stream.mark()
stream.consume()
stream.consume()
stream.rewind()
self.failUnlessEqual(stream.markDepth, 0)
self.failUnlessEqual(stream.index(), 4)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 0)
self.failUnlessEqual(stream.LT(1), 'b')
def testRewindNested(self):
"""StringStream.rewind(): nested"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LT(1), 'a')
class TestFileStream(unittest.TestCase):
"""Test case for the FileStream class."""
def testNoEncoding(self):
path = os.path.join(os.path.dirname(__file__), 'teststreams.input1')
stream = antlr3.FileStream(path)
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LT(1), 'a')
self.failUnlessEqual(stream.LA(1), ord('a'))
def testEncoded(self):
path = os.path.join(os.path.dirname(__file__), 'teststreams.input2')
stream = antlr3.FileStream(path, 'utf-8')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LT(1), u'ä')
self.failUnlessEqual(stream.LA(1), ord(u'ä'))
class TestInputStream(unittest.TestCase):
"""Test case for the InputStream class."""
def testNoEncoding(self):
file = StringIO('foo\nbar')
stream = antlr3.InputStream(file)
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LT(1), 'a')
self.failUnlessEqual(stream.LA(1), ord('a'))
def testEncoded(self):
file = StringIO(u'foo\nbär'.encode('utf-8'))
stream = antlr3.InputStream(file, 'utf-8')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.failUnlessEqual(stream.markDepth, 1)
self.failUnlessEqual(stream.index(), 5)
self.failUnlessEqual(stream.line, 2)
self.failUnlessEqual(stream.charPositionInLine, 1)
self.failUnlessEqual(stream.LT(1), u'ä')
self.failUnlessEqual(stream.LA(1), ord(u'ä'))
class TestCommonTokenStream(unittest.TestCase):
"""Test case for the StringStream class."""
def setUp(self):
"""Setup test fixure
The constructor of CommonTokenStream needs a token source. This
is a simple mock class providing just the nextToken() method.
"""
class MockSource(object):
def __init__(self):
self.tokens = []
def nextToken(self):
try:
return self.tokens.pop(0)
except IndexError:
return None
self.source = MockSource()
def testInit(self):
"""CommonTokenStream.__init__()"""
stream = antlr3.CommonTokenStream(self.source)
self.failUnlessEqual(stream.index(), -1)
def testSetTokenSource(self):
"""CommonTokenStream.setTokenSource()"""
stream = antlr3.CommonTokenStream(None)
stream.setTokenSource(self.source)
self.failUnlessEqual(stream.index(), -1)
self.failUnlessEqual(stream.channel, antlr3.DEFAULT_CHANNEL)
def testLTEmptySource(self):
"""CommonTokenStream.LT(): EOF (empty source)"""
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.failUnlessEqual(lt1.type, antlr3.EOF)
def testLT1(self):
"""CommonTokenStream.LT(1)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.failUnlessEqual(lt1.type, 12)
def testLT1WithHidden(self):
"""CommonTokenStream.LT(1): with hidden tokens"""
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.failUnlessEqual(lt1.type, 13)
def testLT2BeyondEnd(self):
"""CommonTokenStream.LT(2): beyond end"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13, channel=antlr3.HIDDEN_CHANNEL)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(2)
self.failUnlessEqual(lt1.type, antlr3.EOF)
# not yet implemented
def testLTNegative(self):
"""CommonTokenStream.LT(-1): look back"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
lt1 = stream.LT(-1)
self.failUnlessEqual(lt1.type, 12)
def testLB1(self):
"""CommonTokenStream.LB(1)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
self.failUnlessEqual(stream.LB(1).type, 12)
def testLTZero(self):
"""CommonTokenStream.LT(0)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(0)
self.failUnless(lt1 is None)
def testLBBeyondBegin(self):
"""CommonTokenStream.LB(-1): beyond begin"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
self.failUnless(stream.LB(1) is None)
stream.consume()
stream.consume()
self.failUnless(stream.LB(3) is None)
def testFillBuffer(self):
"""CommonTokenStream.fillBuffer()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=14)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
self.failUnlessEqual(len(stream.tokens), 3)
self.failUnlessEqual(stream.tokens[0].type, 12)
self.failUnlessEqual(stream.tokens[1].type, 13)
self.failUnlessEqual(stream.tokens[2].type, 14)
def testConsume(self):
"""CommonTokenStream.consume()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
self.failUnlessEqual(stream.LA(1), 12)
stream.consume()
self.failUnlessEqual(stream.LA(1), 13)
stream.consume()
self.failUnlessEqual(stream.LA(1), antlr3.EOF)
stream.consume()
self.failUnlessEqual(stream.LA(1), antlr3.EOF)
def testSeek(self):
"""CommonTokenStream.seek()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
self.failUnlessEqual(stream.LA(1), 12)
stream.seek(2)
self.failUnlessEqual(stream.LA(1), antlr3.EOF)
stream.seek(0)
self.failUnlessEqual(stream.LA(1), 12)
def testMarkRewind(self):
"""CommonTokenStream.mark()/rewind()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
marker = stream.mark()
stream.consume()
stream.rewind(marker)
self.failUnlessEqual(stream.LA(1), 13)
def testToString(self):
"""CommonTokenStream.toString()"""
self.source.tokens.append(
antlr3.CommonToken(type=12, text="foo")
)
self.source.tokens.append(
antlr3.CommonToken(type=13, text="bar")
)
self.source.tokens.append(
antlr3.CommonToken(type=14, text="gnurz")
)
self.source.tokens.append(
antlr3.CommonToken(type=15, text="blarz")
)
stream = antlr3.CommonTokenStream(self.source)
assert stream.toString() == "foobargnurzblarz"
assert stream.toString(1, 2) == "bargnurz"
assert stream.toString(stream.tokens[1], stream.tokens[-2]) == "bargnurz"
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| Python |
import unittest
import antlr3
class TestDFA(unittest.TestCase):
"""Test case for the DFA class."""
def setUp(self):
"""Setup test fixure.
We need a Recognizer in order to instanciate a DFA.
"""
class TRecognizer(antlr3.BaseRecognizer):
antlr_version = antlr3.runtime_version
self.recog = TRecognizer()
def testInit(self):
"""DFA.__init__()
Just a smoke test.
"""
dfa = antlr3.DFA(
self.recog, 1,
eot=[],
eof=[],
min=[],
max=[],
accept=[],
special=[],
transition=[]
)
def testUnpack(self):
"""DFA.unpack()"""
self.failUnlessEqual(
antlr3.DFA.unpack(
u"\1\3\1\4\2\uffff\1\5\22\uffff\1\2\31\uffff\1\6\6\uffff"
u"\32\6\4\uffff\1\6\1\uffff\32\6"
),
[ 3, 4, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
6, -1, -1, -1, -1, -1, -1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, -1, -1, -1, -1, 6, -1,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6
]
)
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| Python |
# -*- coding: utf-8 -*-
import os
import unittest
from StringIO import StringIO
from antlr3.tree import CommonTreeNodeStream, CommonTree, CommonTreeAdaptor
from antlr3 import CommonToken, UP, DOWN, EOF
class TestTreeNodeStream(unittest.TestCase):
"""Test case for the TreeNodeStream class."""
def setUp(self):
self.adaptor = CommonTreeAdaptor()
def newStream(self, t):
"""Build new stream; let's us override to test other streams."""
return CommonTreeNodeStream(t)
def testSingleNode(self):
t = CommonTree(CommonToken(101))
stream = self.newStream(t)
expecting = "101"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testTwoChildrenOfNilRoot(self):
class V(CommonTree):
def __init__(self, token=None, ttype=None, x=None):
if x is not None:
self.x = x
if ttype is not None and token is None:
self.token = CommonToken(type=ttype)
if token is not None:
self.token = token
def __str__(self):
if self.token is not None:
txt = self.token.text
else:
txt = ""
txt += "<V>"
return txt
root_0 = self.adaptor.nil();
t = V(ttype=101, x=2)
u = V(token=CommonToken(type=102, text="102"))
self.adaptor.addChild(root_0, t)
self.adaptor.addChild(root_0, u)
self.assert_(root_0.parent is None)
self.assertEquals(-1, root_0.childIndex)
self.assertEquals(0, t.childIndex)
self.assertEquals(1, u.childIndex)
def test4Nodes(self):
# ^(101 ^(102 103) 104)
t = CommonTree(CommonToken(101))
t.addChild(CommonTree(CommonToken(102)))
t.getChild(0).addChild(CommonTree(CommonToken(103)))
t.addChild(CommonTree(CommonToken(104)))
stream = self.newStream(t)
expecting = "101 102 103 104"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101 2 102 2 103 3 104 3"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testList(self):
root = CommonTree(None)
t = CommonTree(CommonToken(101))
t.addChild(CommonTree(CommonToken(102)))
t.getChild(0).addChild(CommonTree(CommonToken(103)))
t.addChild(CommonTree(CommonToken(104)))
u = CommonTree(CommonToken(105))
root.addChild(t)
root.addChild(u)
stream = CommonTreeNodeStream(root)
expecting = "101 102 103 104 105"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101 2 102 2 103 3 104 3 105"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testFlatList(self):
root = CommonTree(None)
root.addChild(CommonTree(CommonToken(101)))
root.addChild(CommonTree(CommonToken(102)))
root.addChild(CommonTree(CommonToken(103)))
stream = CommonTreeNodeStream(root)
expecting = "101 102 103"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101 102 103"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testListWithOneNode(self):
root = CommonTree(None)
root.addChild(CommonTree(CommonToken(101)))
stream = CommonTreeNodeStream(root)
expecting = "101"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testAoverB(self):
t = CommonTree(CommonToken(101))
t.addChild(CommonTree(CommonToken(102)))
stream = self.newStream(t)
expecting = "101 102"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101 2 102 3"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testLT(self):
# ^(101 ^(102 103) 104)
t = CommonTree(CommonToken(101))
t.addChild(CommonTree(CommonToken(102)))
t.getChild(0).addChild(CommonTree(CommonToken(103)))
t.addChild(CommonTree(CommonToken(104)))
stream = self.newStream(t)
self.failUnlessEqual(101, stream.LT(1).getType())
self.failUnlessEqual(DOWN, stream.LT(2).getType())
self.failUnlessEqual(102, stream.LT(3).getType())
self.failUnlessEqual(DOWN, stream.LT(4).getType())
self.failUnlessEqual(103, stream.LT(5).getType())
self.failUnlessEqual(UP, stream.LT(6).getType())
self.failUnlessEqual(104, stream.LT(7).getType())
self.failUnlessEqual(UP, stream.LT(8).getType())
self.failUnlessEqual(EOF, stream.LT(9).getType())
# check way ahead
self.failUnlessEqual(EOF, stream.LT(100).getType())
def testMarkRewindEntire(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
m = stream.mark() # MARK
for _ in range(13): # consume til end
stream.LT(1)
stream.consume()
self.failUnlessEqual(EOF, stream.LT(1).getType())
self.failUnlessEqual(UP, stream.LT(-1).getType())
stream.rewind(m) # REWIND
# consume til end again :)
for _ in range(13): # consume til end
stream.LT(1)
stream.consume()
self.failUnlessEqual(EOF, stream.LT(1).getType())
self.failUnlessEqual(UP, stream.LT(-1).getType())
def testMarkRewindInMiddle(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
for _ in range(7): # consume til middle
#System.out.println(tream.LT(1).getType())
stream.consume()
self.failUnlessEqual(107, stream.LT(1).getType())
m = stream.mark() # MARK
stream.consume() # consume 107
stream.consume() # consume UP
stream.consume() # consume UP
stream.consume() # consume 104
stream.rewind(m) # REWIND
self.failUnlessEqual(107, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(UP, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(UP, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(104, stream.LT(1).getType())
stream.consume()
# now we're past rewind position
self.failUnlessEqual(105, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(UP, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(EOF, stream.LT(1).getType())
self.failUnlessEqual(UP, stream.LT(-1).getType())
def testMarkRewindNested(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
m = stream.mark() # MARK at start
stream.consume() # consume 101
stream.consume() # consume DN
m2 = stream.mark() # MARK on 102
stream.consume() # consume 102
stream.consume() # consume DN
stream.consume() # consume 103
stream.consume() # consume 106
stream.rewind(m2) # REWIND to 102
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume()
# stop at 103 and rewind to start
stream.rewind(m) # REWIND to 101
self.failUnlessEqual(101, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(DOWN, stream.LT(1).getType())
def testSeek(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
stream.consume() # consume 101
stream.consume() # consume DN
stream.consume() # consume 102
stream.seek(7) # seek to 107
self.failUnlessEqual(107, stream.LT(1).getType())
stream.consume() # consume 107
stream.consume() # consume UP
stream.consume() # consume UP
self.failUnlessEqual(104, stream.LT(1).getType())
def testSeekFromStart(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
stream.seek(7) # seek to 107
self.failUnlessEqual(107, stream.LT(1).getType())
stream.consume() # consume 107
stream.consume() # consume UP
stream.consume() # consume UP
self.failUnlessEqual(104, stream.LT(1).getType())
def toNodesOnlyString(self, nodes):
buf = []
for i in range(nodes.size()):
t = nodes.LT(i+1)
type = nodes.getTreeAdaptor().getType(t)
if not (type==DOWN or type==UP):
buf.append(str(type))
return ' '.join(buf)
class TestCommonTreeNodeStream(unittest.TestCase):
"""Test case for the CommonTreeNodeStream class."""
def testPushPop(self):
# ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
# stream has 9 real + 8 nav nodes
# Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r1.addChild(CommonTree(CommonToken(103)))
r0.addChild(r1)
r2 = CommonTree(CommonToken(104))
r2.addChild(CommonTree(CommonToken(105)))
r0.addChild(r2)
r3 = CommonTree(CommonToken(106))
r3.addChild(CommonTree(CommonToken(107)))
r0.addChild(r3)
r0.addChild(CommonTree(CommonToken(108)))
r0.addChild(CommonTree(CommonToken(109)))
stream = CommonTreeNodeStream(r0)
expecting = "101 2 102 2 103 3 104 2 105 3 106 2 107 3 108 109 3"
found = str(stream)
self.failUnlessEqual(expecting, found)
# Assume we want to hit node 107 and then "call 102" then return
indexOf102 = 2
indexOf107 = 12
for _ in range(indexOf107):# consume til 107 node
stream.consume()
# CALL 102
self.failUnlessEqual(107, stream.LT(1).getType())
stream.push(indexOf102)
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(103, stream.LT(1).getType())
stream.consume() # consume 103
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN
stream.pop()
self.failUnlessEqual(107, stream.LT(1).getType())
def testNestedPushPop(self):
# ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
# stream has 9 real + 8 nav nodes
# Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r1.addChild(CommonTree(CommonToken(103)))
r0.addChild(r1)
r2 = CommonTree(CommonToken(104))
r2.addChild(CommonTree(CommonToken(105)))
r0.addChild(r2)
r3 = CommonTree(CommonToken(106))
r3.addChild(CommonTree(CommonToken(107)))
r0.addChild(r3)
r0.addChild(CommonTree(CommonToken(108)))
r0.addChild(CommonTree(CommonToken(109)))
stream = CommonTreeNodeStream(r0)
# Assume we want to hit node 107 and then "call 102", which
# calls 104, then return
indexOf102 = 2
indexOf107 = 12
for _ in range(indexOf107): # consume til 107 node
stream.consume()
self.failUnlessEqual(107, stream.LT(1).getType())
# CALL 102
stream.push(indexOf102)
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(103, stream.LT(1).getType())
stream.consume() # consume 103
# CALL 104
indexOf104 = 6
stream.push(indexOf104)
self.failUnlessEqual(104, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(105, stream.LT(1).getType())
stream.consume() # consume 103
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN (to UP node in 102 subtree)
stream.pop()
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN (to empty stack)
stream.pop()
self.failUnlessEqual(107, stream.LT(1).getType())
def testPushPopFromEOF(self):
# ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
# stream has 9 real + 8 nav nodes
# Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r1.addChild(CommonTree(CommonToken(103)))
r0.addChild(r1)
r2 = CommonTree(CommonToken(104))
r2.addChild(CommonTree(CommonToken(105)))
r0.addChild(r2)
r3 = CommonTree(CommonToken(106))
r3.addChild(CommonTree(CommonToken(107)))
r0.addChild(r3)
r0.addChild(CommonTree(CommonToken(108)))
r0.addChild(CommonTree(CommonToken(109)))
stream = CommonTreeNodeStream(r0)
while stream.LA(1) != EOF:
stream.consume()
indexOf102 = 2
indexOf104 = 6
self.failUnlessEqual(EOF, stream.LT(1).getType())
# CALL 102
stream.push(indexOf102)
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(103, stream.LT(1).getType())
stream.consume() # consume 103
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN (to empty stack)
stream.pop()
self.failUnlessEqual(EOF, stream.LT(1).getType())
# CALL 104
stream.push(indexOf104)
self.failUnlessEqual(104, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(105, stream.LT(1).getType())
stream.consume() # consume 103
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN (to empty stack)
stream.pop()
self.failUnlessEqual(EOF, stream.LT(1).getType())
class TestCommonTree(unittest.TestCase):
"""Test case for the CommonTree class."""
def setUp(self):
"""Setup test fixure"""
self.adaptor = CommonTreeAdaptor()
def testSingleNode(self):
t = CommonTree(CommonToken(101))
self.failUnless(t.parent is None)
self.failUnlessEqual(-1, t.childIndex)
def test4Nodes(self):
# ^(101 ^(102 103) 104)
r0 = CommonTree(CommonToken(101))
r0.addChild(CommonTree(CommonToken(102)))
r0.getChild(0).addChild(CommonTree(CommonToken(103)))
r0.addChild(CommonTree(CommonToken(104)))
self.failUnless(r0.parent is None)
self.failUnlessEqual(-1, r0.childIndex)
def testList(self):
# ^(nil 101 102 103)
r0 = CommonTree(None)
c0=CommonTree(CommonToken(101))
r0.addChild(c0)
c1=CommonTree(CommonToken(102))
r0.addChild(c1)
c2=CommonTree(CommonToken(103))
r0.addChild(c2)
self.failUnless(r0.parent is None)
self.failUnlessEqual(-1, r0.childIndex)
self.failUnlessEqual(r0, c0.parent)
self.failUnlessEqual(0, c0.childIndex)
self.failUnlessEqual(r0, c1.parent)
self.failUnlessEqual(1, c1.childIndex)
self.failUnlessEqual(r0, c2.parent)
self.failUnlessEqual(2, c2.childIndex)
def testList2(self):
# Add child ^(nil 101 102 103) to root 5
# should pull 101 102 103 directly to become 5's child list
root = CommonTree(CommonToken(5))
# child tree
r0 = CommonTree(None)
c0=CommonTree(CommonToken(101))
r0.addChild(c0)
c1=CommonTree(CommonToken(102))
r0.addChild(c1)
c2=CommonTree(CommonToken(103))
r0.addChild(c2)
root.addChild(r0)
self.failUnless(root.parent is None)
self.failUnlessEqual(-1, root.childIndex)
# check children of root all point at root
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(0, c0.childIndex)
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(1, c1.childIndex)
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(2, c2.childIndex)
def testAddListToExistChildren(self):
# Add child ^(nil 101 102 103) to root ^(5 6)
# should add 101 102 103 to end of 5's child list
root = CommonTree(CommonToken(5))
root.addChild(CommonTree(CommonToken(6)))
# child tree
r0 = CommonTree(None)
c0=CommonTree(CommonToken(101))
r0.addChild(c0)
c1=CommonTree(CommonToken(102))
r0.addChild(c1)
c2=CommonTree(CommonToken(103))
r0.addChild(c2)
root.addChild(r0)
self.failUnless(root.parent is None)
self.failUnlessEqual(-1, root.childIndex)
# check children of root all point at root
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(1, c0.childIndex)
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(2, c1.childIndex)
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(3, c2.childIndex)
def testDupTree(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
dup = self.adaptor.dupTree(r0)
self.failUnless(dup.parent is None)
self.failUnlessEqual(-1, dup.childIndex)
dup.sanityCheckParentAndChildIndexes()
def testBecomeRoot(self):
# 5 becomes root of ^(nil 101 102 103)
newRoot = CommonTree(CommonToken(5))
oldRoot = CommonTree(None)
oldRoot.addChild(CommonTree(CommonToken(101)))
oldRoot.addChild(CommonTree(CommonToken(102)))
oldRoot.addChild(CommonTree(CommonToken(103)))
self.adaptor.becomeRoot(newRoot, oldRoot)
newRoot.sanityCheckParentAndChildIndexes()
def testBecomeRoot2(self):
# 5 becomes root of ^(101 102 103)
newRoot = CommonTree(CommonToken(5))
oldRoot = CommonTree(CommonToken(101))
oldRoot.addChild(CommonTree(CommonToken(102)))
oldRoot.addChild(CommonTree(CommonToken(103)))
self.adaptor.becomeRoot(newRoot, oldRoot)
newRoot.sanityCheckParentAndChildIndexes()
def testBecomeRoot3(self):
# ^(nil 5) becomes root of ^(nil 101 102 103)
newRoot = CommonTree(None)
newRoot.addChild(CommonTree(CommonToken(5)))
oldRoot = CommonTree(None)
oldRoot.addChild(CommonTree(CommonToken(101)))
oldRoot.addChild(CommonTree(CommonToken(102)))
oldRoot.addChild(CommonTree(CommonToken(103)))
self.adaptor.becomeRoot(newRoot, oldRoot)
newRoot.sanityCheckParentAndChildIndexes()
def testBecomeRoot5(self):
# ^(nil 5) becomes root of ^(101 102 103)
newRoot = CommonTree(None)
newRoot.addChild(CommonTree(CommonToken(5)))
oldRoot = CommonTree(CommonToken(101))
oldRoot.addChild(CommonTree(CommonToken(102)))
oldRoot.addChild(CommonTree(CommonToken(103)))
self.adaptor.becomeRoot(newRoot, oldRoot)
newRoot.sanityCheckParentAndChildIndexes()
def testBecomeRoot6(self):
# emulates construction of ^(5 6)
root_0 = self.adaptor.nil()
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(CommonTree(CommonToken(5)), root_1)
self.adaptor.addChild(root_1, CommonTree(CommonToken(6)))
self.adaptor.addChild(root_0, root_1)
root_0.sanityCheckParentAndChildIndexes()
# Test replaceChildren
def testReplaceWithNoChildren(self):
t = CommonTree(CommonToken(101))
newChild = CommonTree(CommonToken(5))
error = False
try:
t.replaceChildren(0, 0, newChild)
except IndexError:
error = True
self.failUnless(error)
def testReplaceWithOneChildren(self):
# assume token type 99 and use text
t = CommonTree(CommonToken(99, text="a"))
c0 = CommonTree(CommonToken(99, text="b"))
t.addChild(c0)
newChild = CommonTree(CommonToken(99, text="c"))
t.replaceChildren(0, 0, newChild)
expecting = "(a c)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceInMiddle(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c"))) # index 1
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(1, 1, newChild)
expecting = "(a b x d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceAtLeft(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b"))) # index 0
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(0, 0, newChild)
expecting = "(a x c d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceAtRight(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d"))) # index 2
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(2, 2, newChild)
expecting = "(a b c x)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceOneWithTwoAtLeft(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChildren = self.adaptor.nil()
newChildren.addChild(CommonTree(CommonToken(99, text="x")))
newChildren.addChild(CommonTree(CommonToken(99, text="y")))
t.replaceChildren(0, 0, newChildren)
expecting = "(a x y c d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceOneWithTwoAtRight(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChildren = self.adaptor.nil()
newChildren.addChild(CommonTree(CommonToken(99, text="x")))
newChildren.addChild(CommonTree(CommonToken(99, text="y")))
t.replaceChildren(2, 2, newChildren)
expecting = "(a b c x y)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceOneWithTwoInMiddle(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChildren = self.adaptor.nil()
newChildren.addChild(CommonTree(CommonToken(99, text="x")))
newChildren.addChild(CommonTree(CommonToken(99, text="y")))
t.replaceChildren(1, 1, newChildren)
expecting = "(a b x y d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceTwoWithOneAtLeft(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(0, 1, newChild)
expecting = "(a x d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceTwoWithOneAtRight(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(1, 2, newChild)
expecting = "(a b x)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceAllWithOne(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(0, 2, newChild)
expecting = "(a x)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceAllWithTwo(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChildren = self.adaptor.nil()
newChildren.addChild(CommonTree(CommonToken(99, text="x")))
newChildren.addChild(CommonTree(CommonToken(99, text="y")))
t.replaceChildren(0, 2, newChildren)
expecting = "(a x y)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| Python |
# -*- coding: utf-8 -*-
import os
import unittest
from StringIO import StringIO
from antlr3.tree import CommonTreeAdaptor, CommonTree, INVALID_TOKEN_TYPE
from antlr3.treewizard import TreeWizard, computeTokenTypes, \
TreePatternLexer, EOF, ID, BEGIN, END, PERCENT, COLON, DOT, ARG, \
TreePatternParser, \
TreePattern, WildcardTreePattern, TreePatternTreeAdaptor
class TestComputeTokenTypes(unittest.TestCase):
"""Test case for the computeTokenTypes function."""
def testNone(self):
"""computeTokenTypes(None) -> {}"""
typeMap = computeTokenTypes(None)
self.failUnless(isinstance(typeMap, dict))
self.failUnlessEqual(typeMap, {})
def testList(self):
"""computeTokenTypes(['a', 'b']) -> { 'a': 0, 'b': 1 }"""
typeMap = computeTokenTypes(['a', 'b'])
self.failUnless(isinstance(typeMap, dict))
self.failUnlessEqual(typeMap, { 'a': 0, 'b': 1 })
class TestTreePatternLexer(unittest.TestCase):
"""Test case for the TreePatternLexer class."""
def testBegin(self):
"""TreePatternLexer(): '('"""
lexer = TreePatternLexer('(')
type = lexer.nextToken()
self.failUnlessEqual(type, BEGIN)
self.failUnlessEqual(lexer.sval, '')
self.failUnlessEqual(lexer.error, False)
def testEnd(self):
"""TreePatternLexer(): ')'"""
lexer = TreePatternLexer(')')
type = lexer.nextToken()
self.failUnlessEqual(type, END)
self.failUnlessEqual(lexer.sval, '')
self.failUnlessEqual(lexer.error, False)
def testPercent(self):
"""TreePatternLexer(): '%'"""
lexer = TreePatternLexer('%')
type = lexer.nextToken()
self.failUnlessEqual(type, PERCENT)
self.failUnlessEqual(lexer.sval, '')
self.failUnlessEqual(lexer.error, False)
def testDot(self):
"""TreePatternLexer(): '.'"""
lexer = TreePatternLexer('.')
type = lexer.nextToken()
self.failUnlessEqual(type, DOT)
self.failUnlessEqual(lexer.sval, '')
self.failUnlessEqual(lexer.error, False)
def testColon(self):
"""TreePatternLexer(): ':'"""
lexer = TreePatternLexer(':')
type = lexer.nextToken()
self.failUnlessEqual(type, COLON)
self.failUnlessEqual(lexer.sval, '')
self.failUnlessEqual(lexer.error, False)
def testEOF(self):
"""TreePatternLexer(): EOF"""
lexer = TreePatternLexer(' \n \r \t ')
type = lexer.nextToken()
self.failUnlessEqual(type, EOF)
self.failUnlessEqual(lexer.sval, '')
self.failUnlessEqual(lexer.error, False)
def testID(self):
"""TreePatternLexer(): ID"""
lexer = TreePatternLexer('_foo12_bar')
type = lexer.nextToken()
self.failUnlessEqual(type, ID)
self.failUnlessEqual(lexer.sval, '_foo12_bar')
self.failUnlessEqual(lexer.error, False)
def testARG(self):
"""TreePatternLexer(): ARG"""
lexer = TreePatternLexer('[ \\]bla\\n]')
type = lexer.nextToken()
self.failUnlessEqual(type, ARG)
self.failUnlessEqual(lexer.sval, ' ]bla\\n')
self.failUnlessEqual(lexer.error, False)
def testError(self):
"""TreePatternLexer(): error"""
lexer = TreePatternLexer('1')
type = lexer.nextToken()
self.failUnlessEqual(type, EOF)
self.failUnlessEqual(lexer.sval, '')
self.failUnlessEqual(lexer.error, True)
class TestTreePatternParser(unittest.TestCase):
"""Test case for the TreePatternParser class."""
def setUp(self):
"""Setup text fixure
We need a tree adaptor, use CommonTreeAdaptor.
And a constant list of token names.
"""
self.adaptor = CommonTreeAdaptor()
self.tokens = [
"", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"
]
self.wizard = TreeWizard(self.adaptor, tokenNames=self.tokens)
def testSingleNode(self):
"""TreePatternParser: 'ID'"""
lexer = TreePatternLexer('ID')
parser = TreePatternParser(lexer, self.wizard, self.adaptor)
tree = parser.pattern()
self.failUnless(isinstance(tree, CommonTree))
self.failUnlessEqual(tree.getType(), 10)
self.failUnlessEqual(tree.getText(), 'ID')
def testSingleNodeWithArg(self):
"""TreePatternParser: 'ID[foo]'"""
lexer = TreePatternLexer('ID[foo]')
parser = TreePatternParser(lexer, self.wizard, self.adaptor)
tree = parser.pattern()
self.failUnless(isinstance(tree, CommonTree))
self.failUnlessEqual(tree.getType(), 10)
self.failUnlessEqual(tree.getText(), 'foo')
def testSingleLevelTree(self):
"""TreePatternParser: '(A B)'"""
lexer = TreePatternLexer('(A B)')
parser = TreePatternParser(lexer, self.wizard, self.adaptor)
tree = parser.pattern()
self.failUnless(isinstance(tree, CommonTree))
self.failUnlessEqual(tree.getType(), 5)
self.failUnlessEqual(tree.getText(), 'A')
self.failUnlessEqual(tree.getChildCount(), 1)
self.failUnlessEqual(tree.getChild(0).getType(), 6)
self.failUnlessEqual(tree.getChild(0).getText(), 'B')
def testNil(self):
"""TreePatternParser: 'nil'"""
lexer = TreePatternLexer('nil')
parser = TreePatternParser(lexer, self.wizard, self.adaptor)
tree = parser.pattern()
self.failUnless(isinstance(tree, CommonTree))
self.failUnlessEqual(tree.getType(), 0)
self.failUnlessEqual(tree.getText(), None)
def testWildcard(self):
"""TreePatternParser: '(.)'"""
lexer = TreePatternLexer('(.)')
parser = TreePatternParser(lexer, self.wizard, self.adaptor)
tree = parser.pattern()
self.failUnless(isinstance(tree, WildcardTreePattern))
def testLabel(self):
"""TreePatternParser: '(%a:A)'"""
lexer = TreePatternLexer('(%a:A)')
parser = TreePatternParser(lexer, self.wizard, TreePatternTreeAdaptor())
tree = parser.pattern()
self.failUnless(isinstance(tree, TreePattern))
self.failUnlessEqual(tree.label, 'a')
def testError1(self):
"""TreePatternParser: ')'"""
lexer = TreePatternLexer(')')
parser = TreePatternParser(lexer, self.wizard, self.adaptor)
tree = parser.pattern()
self.failUnless(tree is None)
def testError2(self):
"""TreePatternParser: '()'"""
lexer = TreePatternLexer('()')
parser = TreePatternParser(lexer, self.wizard, self.adaptor)
tree = parser.pattern()
self.failUnless(tree is None)
def testError3(self):
"""TreePatternParser: '(A ])'"""
lexer = TreePatternLexer('(A ])')
parser = TreePatternParser(lexer, self.wizard, self.adaptor)
tree = parser.pattern()
self.failUnless(tree is None)
class TestTreeWizard(unittest.TestCase):
"""Test case for the TreeWizard class."""
def setUp(self):
"""Setup text fixure
We need a tree adaptor, use CommonTreeAdaptor.
And a constant list of token names.
"""
self.adaptor = CommonTreeAdaptor()
self.tokens = [
"", "", "", "", "", "A", "B", "C", "D", "E", "ID", "VAR"
]
def testInit(self):
"""TreeWizard.__init__()"""
wiz = TreeWizard(
self.adaptor,
tokenNames=['a', 'b']
)
self.failUnless(wiz.adaptor is self.adaptor)
self.failUnlessEqual(
wiz.tokenNameToTypeMap,
{ 'a': 0, 'b': 1 }
)
def testGetTokenType(self):
"""TreeWizard.getTokenType()"""
wiz = TreeWizard(
self.adaptor,
tokenNames=self.tokens
)
self.failUnlessEqual(
wiz.getTokenType('A'),
5
)
self.failUnlessEqual(
wiz.getTokenType('VAR'),
11
)
self.failUnlessEqual(
wiz.getTokenType('invalid'),
INVALID_TOKEN_TYPE
)
def testSingleNode(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("ID")
found = t.toStringTree()
expecting = "ID"
self.failUnlessEqual(expecting, found)
def testSingleNodeWithArg(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("ID[foo]")
found = t.toStringTree()
expecting = "foo"
self.failUnlessEqual(expecting, found)
def testSingleNodeTree(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A)")
found = t.toStringTree()
expecting = "A"
self.failUnlessEqual(expecting, found)
def testSingleLevelTree(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B C D)")
found = t.toStringTree()
expecting = "(A B C D)"
self.failUnlessEqual(expecting, found)
def testListTree(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(nil A B C)")
found = t.toStringTree()
expecting = "A B C"
self.failUnlessEqual(expecting, found)
def testInvalidListTree(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("A B C")
self.failUnless(t is None)
def testDoubleLevelTree(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A (B C) (B D) E)")
found = t.toStringTree()
expecting = "(A (B C) (B D) E)"
self.failUnlessEqual(expecting, found)
def __simplifyIndexMap(self, indexMap):
return dict( # stringify nodes for easy comparing
(ttype, [str(node) for node in nodes])
for ttype, nodes in indexMap.items()
)
def testSingleNodeIndex(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("ID")
indexMap = wiz.index(tree)
found = self.__simplifyIndexMap(indexMap)
expecting = { 10: ["ID"] }
self.failUnlessEqual(expecting, found)
def testNoRepeatsIndex(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B C D)")
indexMap = wiz.index(tree)
found = self.__simplifyIndexMap(indexMap)
expecting = { 8:['D'], 6:['B'], 7:['C'], 5:['A'] }
self.failUnlessEqual(expecting, found)
def testRepeatsIndex(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B (A C B) B D D)")
indexMap = wiz.index(tree)
found = self.__simplifyIndexMap(indexMap)
expecting = { 8: ['D', 'D'], 6: ['B', 'B', 'B'], 7: ['C'], 5: ['A', 'A'] }
self.failUnlessEqual(expecting, found)
def testNoRepeatsVisit(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B C D)")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append(str(node))
wiz.visit(tree, wiz.getTokenType("B"), visitor)
expecting = ['B']
self.failUnlessEqual(expecting, elements)
def testNoRepeatsVisit2(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B (A C B) B D D)")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append(str(node))
wiz.visit(tree, wiz.getTokenType("C"), visitor)
expecting = ['C']
self.failUnlessEqual(expecting, elements)
def testRepeatsVisit(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B (A C B) B D D)")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append(str(node))
wiz.visit(tree, wiz.getTokenType("B"), visitor)
expecting = ['B', 'B', 'B']
self.failUnlessEqual(expecting, elements)
def testRepeatsVisit2(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B (A C B) B D D)")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append(str(node))
wiz.visit(tree, wiz.getTokenType("A"), visitor)
expecting = ['A', 'A']
self.failUnlessEqual(expecting, elements)
def testRepeatsVisitWithContext(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B (A C B) B D D)")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append('%s@%s[%d]' % (node, parent, childIndex))
wiz.visit(tree, wiz.getTokenType("B"), visitor)
expecting = ['B@A[0]', 'B@A[1]', 'B@A[2]']
self.failUnlessEqual(expecting, elements)
def testRepeatsVisitWithNullParentAndContext(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B (A C B) B D D)")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append(
'%s@%s[%d]'
% (node, ['nil', parent][parent is not None], childIndex)
)
wiz.visit(tree, wiz.getTokenType("A"), visitor)
expecting = ['A@nil[0]', 'A@A[1]']
self.failUnlessEqual(expecting, elements)
def testVisitPattern(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B C (A B) D)")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append(
str(node)
)
wiz.visit(tree, '(A B)', visitor)
expecting = ['A'] # shouldn't match overall root, just (A B)
self.failUnlessEqual(expecting, elements)
def testVisitPatternMultiple(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B C (A B) (D (A B)))")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append(
'%s@%s[%d]'
% (node, ['nil', parent][parent is not None], childIndex)
)
wiz.visit(tree, '(A B)', visitor)
expecting = ['A@A[2]', 'A@D[0]']
self.failUnlessEqual(expecting, elements)
def testVisitPatternMultipleWithLabels(self):
wiz = TreeWizard(self.adaptor, self.tokens)
tree = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))")
elements = []
def visitor(node, parent, childIndex, labels):
elements.append(
'%s@%s[%d]%s&%s'
% (node,
['nil', parent][parent is not None],
childIndex,
labels['a'],
labels['b'],
)
)
wiz.visit(tree, '(%a:A %b:B)', visitor)
expecting = ['foo@A[2]foo&bar', 'big@D[0]big&dog']
self.failUnlessEqual(expecting, elements)
def testParse(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B C)")
valid = wiz.parse(t, "(A B C)")
self.failUnless(valid)
def testParseSingleNode(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("A")
valid = wiz.parse(t, "A")
self.failUnless(valid)
def testParseSingleNodeFails(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("A")
valid = wiz.parse(t, "B")
self.failUnless(not valid)
def testParseFlatTree(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(nil A B C)")
valid = wiz.parse(t, "(nil A B C)")
self.failUnless(valid)
def testParseFlatTreeFails(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(nil A B C)")
valid = wiz.parse(t, "(nil A B)")
self.failUnless(not valid)
def testParseFlatTreeFails2(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(nil A B C)")
valid = wiz.parse(t, "(nil A B A)")
self.failUnless(not valid)
def testWildcard(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B C)")
valid = wiz.parse(t, "(A . .)")
self.failUnless(valid)
def testParseWithText(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B[foo] C[bar])")
# C pattern has no text arg so despite [bar] in t, no need
# to match text--check structure only.
valid = wiz.parse(t, "(A B[foo] C)")
self.failUnless(valid)
def testParseWithTextFails(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B C)")
valid = wiz.parse(t, "(A[foo] B C)")
self.failUnless(not valid) # fails
def testParseLabels(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B C)")
labels = {}
valid = wiz.parse(t, "(%a:A %b:B %c:C)", labels)
self.failUnless(valid)
self.failUnlessEqual("A", str(labels["a"]))
self.failUnlessEqual("B", str(labels["b"]))
self.failUnlessEqual("C", str(labels["c"]))
def testParseWithWildcardLabels(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B C)")
labels = {}
valid = wiz.parse(t, "(A %b:. %c:.)", labels)
self.failUnless(valid)
self.failUnlessEqual("B", str(labels["b"]))
self.failUnlessEqual("C", str(labels["c"]))
def testParseLabelsAndTestText(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B[foo] C)")
labels = {}
valid = wiz.parse(t, "(%a:A %b:B[foo] %c:C)", labels)
self.failUnless(valid)
self.failUnlessEqual("A", str(labels["a"]))
self.failUnlessEqual("foo", str(labels["b"]))
self.failUnlessEqual("C", str(labels["c"]))
def testParseLabelsInNestedTree(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A (B C) (D E))")
labels = {}
valid = wiz.parse(t, "(%a:A (%b:B %c:C) (%d:D %e:E) )", labels)
self.failUnless(valid)
self.failUnlessEqual("A", str(labels["a"]))
self.failUnlessEqual("B", str(labels["b"]))
self.failUnlessEqual("C", str(labels["c"]))
self.failUnlessEqual("D", str(labels["d"]))
self.failUnlessEqual("E", str(labels["e"]))
def testEquals(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t1 = wiz.create("(A B C)")
t2 = wiz.create("(A B C)")
same = wiz.equals(t1, t2)
self.failUnless(same)
def testEqualsWithText(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t1 = wiz.create("(A B[foo] C)")
t2 = wiz.create("(A B[foo] C)")
same = wiz.equals(t1, t2)
self.failUnless(same)
def testEqualsWithMismatchedText(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t1 = wiz.create("(A B[foo] C)")
t2 = wiz.create("(A B C)")
same = wiz.equals(t1, t2)
self.failUnless(not same)
def testEqualsWithMismatchedList(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t1 = wiz.create("(A B C)")
t2 = wiz.create("(A B A)")
same = wiz.equals(t1, t2)
self.failUnless(not same)
def testEqualsWithMismatchedListLength(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t1 = wiz.create("(A B C)")
t2 = wiz.create("(A B)")
same = wiz.equals(t1, t2)
self.failUnless(not same)
def testFindPattern(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))")
subtrees = wiz.find(t, "(A B)")
found = [str(node) for node in subtrees]
expecting = ['foo', 'big']
self.failUnlessEqual(expecting, found)
def testFindTokenType(self):
wiz = TreeWizard(self.adaptor, self.tokens)
t = wiz.create("(A B C (A[foo] B[bar]) (D (A[big] B[dog])))")
subtrees = wiz.find(t, wiz.getTokenType('A'))
found = [str(node) for node in subtrees]
expecting = ['A', 'foo', 'big']
self.failUnlessEqual(expecting, found)
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| Python |
import sys
import unittest
import antlr3
class TestBaseRecognizer(unittest.TestCase):
"""Tests for BaseRecognizer class"""
def testGetRuleInvocationStack(self):
"""BaseRecognizer._getRuleInvocationStack()"""
rules = antlr3.BaseRecognizer._getRuleInvocationStack(__name__)
self.failUnlessEqual(
rules,
['testGetRuleInvocationStack']
)
class TestTokenSource(unittest.TestCase):
"""Testcase to the antlr3.TokenSource class"""
def testIteratorInterface(self):
"""TokenSource.next()"""
class TrivialToken(object):
def __init__(self, type):
self.type = type
class TestSource(antlr3.TokenSource):
def __init__(self):
self.tokens = [
TrivialToken(1),
TrivialToken(2),
TrivialToken(3),
TrivialToken(4),
TrivialToken(antlr3.EOF),
]
def nextToken(self):
return self.tokens.pop(0)
src = TestSource()
tokens = []
for token in src:
tokens.append(token.type)
self.failUnlessEqual(tokens, [1, 2, 3, 4])
class TestLexer(unittest.TestCase):
def testInit(self):
"""Lexer.__init__()"""
class TLexer(antlr3.Lexer):
antlr_version = antlr3.runtime_version
stream = antlr3.StringStream('foo')
TLexer(stream)
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| Python |
import unittest
import antlr3
import testbase
class TestRecognitionException(unittest.TestCase):
"""Tests for the antlr3.RecognitionException class"""
def testInitNone(self):
"""RecognitionException.__init__()"""
exc = antlr3.RecognitionException()
class TestEarlyExitException(unittest.TestCase):
"""Tests for the antlr3.EarlyExitException class"""
@testbase.broken("FIXME", Exception)
def testInitNone(self):
"""EarlyExitException.__init__()"""
exc = antlr3.EarlyExitException()
class TestFailedPredicateException(unittest.TestCase):
"""Tests for the antlr3.FailedPredicateException class"""
@testbase.broken("FIXME", Exception)
def testInitNone(self):
"""FailedPredicateException.__init__()"""
exc = antlr3.FailedPredicateException()
class TestMismatchedNotSetException(unittest.TestCase):
"""Tests for the antlr3.MismatchedNotSetException class"""
@testbase.broken("FIXME", Exception)
def testInitNone(self):
"""MismatchedNotSetException.__init__()"""
exc = antlr3.MismatchedNotSetException()
class TestMismatchedRangeException(unittest.TestCase):
"""Tests for the antlr3.MismatchedRangeException class"""
@testbase.broken("FIXME", Exception)
def testInitNone(self):
"""MismatchedRangeException.__init__()"""
exc = antlr3.MismatchedRangeException()
class TestMismatchedSetException(unittest.TestCase):
"""Tests for the antlr3.MismatchedSetException class"""
@testbase.broken("FIXME", Exception)
def testInitNone(self):
"""MismatchedSetException.__init__()"""
exc = antlr3.MismatchedSetException()
class TestMismatchedTokenException(unittest.TestCase):
"""Tests for the antlr3.MismatchedTokenException class"""
@testbase.broken("FIXME", Exception)
def testInitNone(self):
"""MismatchedTokenException.__init__()"""
exc = antlr3.MismatchedTokenException()
class TestMismatchedTreeNodeException(unittest.TestCase):
"""Tests for the antlr3.MismatchedTreeNodeException class"""
@testbase.broken("FIXME", Exception)
def testInitNone(self):
"""MismatchedTreeNodeException.__init__()"""
exc = antlr3.MismatchedTreeNodeException()
class TestNoViableAltException(unittest.TestCase):
"""Tests for the antlr3.NoViableAltException class"""
@testbase.broken("FIXME", Exception)
def testInitNone(self):
"""NoViableAltException.__init__()"""
exc = antlr3.NoViableAltException()
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| Python |
import unittest
class BrokenTest(unittest.TestCase.failureException):
def __repr__(self):
name, reason = self.args
return '%s: %s: %s works now' % (
(self.__class__.__name__, name, reason))
def broken(reason, *exceptions):
'''Indicates a failing (or erroneous) test case fails that should succeed.
If the test fails with an exception, list the exception type in args'''
def wrapper(test_method):
def replacement(*args, **kwargs):
try:
test_method(*args, **kwargs)
except exceptions or unittest.TestCase.failureException:
pass
else:
raise BrokenTest(test_method.__name__, reason)
replacement.__doc__ = test_method.__doc__
replacement.__name__ = 'XXX_' + test_method.__name__
replacement.todo = reason
return replacement
return wrapper
| Python |
import unittest
import antlr3
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| Python |
"""Compatibility stuff"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
try:
set = set
frozenset = frozenset
except NameError:
from sets import Set as set, ImmutableSet as frozenset
try:
reversed = reversed
except NameError:
def reversed(l):
l = l[:]
l.reverse()
return l
| Python |
""" @package antlr3.tree
@brief ANTLR3 runtime package, treewizard module
A utility module to create ASTs at runtime.
See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import INVALID_TOKEN_TYPE
from antlr3.tokens import CommonToken
from antlr3.tree import CommonTree, CommonTreeAdaptor
def computeTokenTypes(tokenNames):
"""
Compute a dict that is an inverted index of
tokenNames (which maps int token types to names).
"""
if tokenNames is None:
return {}
return dict((name, type) for type, name in enumerate(tokenNames))
## token types for pattern parser
EOF = -1
BEGIN = 1
END = 2
ID = 3
ARG = 4
PERCENT = 5
COLON = 6
DOT = 7
class TreePatternLexer(object):
def __init__(self, pattern):
## The tree pattern to lex like "(A B C)"
self.pattern = pattern
## Index into input string
self.p = -1
## Current char
self.c = None
## How long is the pattern in char?
self.n = len(pattern)
## Set when token type is ID or ARG
self.sval = None
self.error = False
self.consume()
__idStartChar = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
)
__idChar = __idStartChar | frozenset('0123456789')
def nextToken(self):
self.sval = ""
while self.c != EOF:
if self.c in (' ', '\n', '\r', '\t'):
self.consume()
continue
if self.c in self.__idStartChar:
self.sval += self.c
self.consume()
while self.c in self.__idChar:
self.sval += self.c
self.consume()
return ID
if self.c == '(':
self.consume()
return BEGIN
if self.c == ')':
self.consume()
return END
if self.c == '%':
self.consume()
return PERCENT
if self.c == ':':
self.consume()
return COLON
if self.c == '.':
self.consume()
return DOT
if self.c == '[': # grab [x] as a string, returning x
self.consume()
while self.c != ']':
if self.c == '\\':
self.consume()
if self.c != ']':
self.sval += '\\'
self.sval += self.c
else:
self.sval += self.c
self.consume()
self.consume()
return ARG
self.consume()
self.error = True
return EOF
return EOF
def consume(self):
self.p += 1
if self.p >= self.n:
self.c = EOF
else:
self.c = self.pattern[self.p]
class TreePatternParser(object):
def __init__(self, tokenizer, wizard, adaptor):
self.tokenizer = tokenizer
self.wizard = wizard
self.adaptor = adaptor
self.ttype = tokenizer.nextToken() # kickstart
def pattern(self):
if self.ttype == BEGIN:
return self.parseTree()
elif self.ttype == ID:
node = self.parseNode()
if self.ttype == EOF:
return node
return None # extra junk on end
return None
def parseTree(self):
if self.ttype != BEGIN:
return None
self.ttype = self.tokenizer.nextToken()
root = self.parseNode()
if root is None:
return None
while self.ttype in (BEGIN, ID, PERCENT, DOT):
if self.ttype == BEGIN:
subtree = self.parseTree()
self.adaptor.addChild(root, subtree)
else:
child = self.parseNode()
if child is None:
return None
self.adaptor.addChild(root, child)
if self.ttype != END:
return None
self.ttype = self.tokenizer.nextToken()
return root
def parseNode(self):
# "%label:" prefix
label = None
if self.ttype == PERCENT:
self.ttype = self.tokenizer.nextToken()
if self.ttype != ID:
return None
label = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if self.ttype != COLON:
return None
self.ttype = self.tokenizer.nextToken() # move to ID following colon
# Wildcard?
if self.ttype == DOT:
self.ttype = self.tokenizer.nextToken()
wildcardPayload = CommonToken(0, ".")
node = WildcardTreePattern(wildcardPayload)
if label is not None:
node.label = label
return node
# "ID" or "ID[arg]"
if self.ttype != ID:
return None
tokenName = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if tokenName == "nil":
return self.adaptor.nil()
text = tokenName
# check for arg
arg = None
if self.ttype == ARG:
arg = self.tokenizer.sval
text = arg
self.ttype = self.tokenizer.nextToken()
# create node
treeNodeType = self.wizard.getTokenType(tokenName)
if treeNodeType == INVALID_TOKEN_TYPE:
return None
node = self.adaptor.createFromType(treeNodeType, text)
if label is not None and isinstance(node, TreePattern):
node.label = label
if arg is not None and isinstance(node, TreePattern):
node.hasTextArg = True
return node
class TreePattern(CommonTree):
"""
When using %label:TOKENNAME in a tree for parse(), we must
track the label.
"""
def __init__(self, payload):
CommonTree.__init__(self, payload)
self.label = None
self.hasTextArg = None
def toString(self):
if self.label is not None:
return '%' + self.label + ':' + CommonTree.toString(self)
else:
return CommonTree.toString(self)
class WildcardTreePattern(TreePattern):
pass
class TreePatternTreeAdaptor(CommonTreeAdaptor):
"""This adaptor creates TreePattern objects for use during scan()"""
def createWithPayload(self, payload):
return TreePattern(payload)
class TreeWizard(object):
"""
Build and navigate trees with this object. Must know about the names
of tokens so you have to pass in a map or array of token names (from which
this class can build the map). I.e., Token DECL means nothing unless the
class can translate it to a token type.
In order to create nodes and navigate, this class needs a TreeAdaptor.
This class can build a token type -> node index for repeated use or for
iterating over the various nodes with a particular type.
This class works in conjunction with the TreeAdaptor rather than moving
all this functionality into the adaptor. An adaptor helps build and
navigate trees using methods. This class helps you do it with string
patterns like "(A B C)". You can create a tree from that pattern or
match subtrees against it.
"""
def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
self.adaptor = adaptor
if typeMap is None:
self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
else:
if tokenNames is not None:
raise ValueError("Can't have both tokenNames and typeMap")
self.tokenNameToTypeMap = typeMap
def getTokenType(self, tokenName):
"""Using the map of token names to token types, return the type."""
try:
return self.tokenNameToTypeMap[tokenName]
except KeyError:
return INVALID_TOKEN_TYPE
def create(self, pattern):
"""
Create a tree or node from the indicated tree pattern that closely
follows ANTLR tree grammar tree element syntax:
(root child1 ... child2).
You can also just pass in a node: ID
Any node can have a text argument: ID[foo]
(notice there are no quotes around foo--it's clear it's a string).
nil is a special name meaning "give me a nil node". Useful for
making lists: (nil A B C) is a list of A B C.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, self.adaptor)
return parser.pattern()
def index(self, tree):
"""Walk the entire tree and make a node name to nodes mapping.
For now, use recursion but later nonrecursive version may be
more efficient. Returns a dict int -> list where the list is
of your AST node type. The int is the token type of the node.
"""
m = {}
self._index(tree, m)
return m
def _index(self, t, m):
"""Do the work for index"""
if t is None:
return
ttype = self.adaptor.getType(t)
elements = m.get(ttype)
if elements is None:
m[ttype] = elements = []
elements.append(t)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._index(child, m)
def find(self, tree, what):
"""Return a list of matching token.
what may either be an integer specifzing the token type to find or
a string with a pattern that must be matched.
"""
if isinstance(what, (int, long)):
return self._findTokenType(tree, what)
elif isinstance(what, basestring):
return self._findPattern(tree, what)
else:
raise TypeError("'what' must be string or integer")
def _findTokenType(self, t, ttype):
"""Return a List of tree nodes with token type ttype"""
nodes = []
def visitor(tree, parent, childIndex, labels):
nodes.append(tree)
self.visit(t, ttype, visitor)
return nodes
def _findPattern(self, t, pattern):
"""Return a List of subtrees matching pattern."""
subtrees = []
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return None
rootTokenType = tpattern.getType()
def visitor(tree, parent, childIndex, label):
if self._parse(tree, tpattern, None):
subtrees.append(tree)
self.visit(t, rootTokenType, visitor)
return subtrees
def visit(self, tree, what, visitor):
"""Visit every node in tree matching what, invoking the visitor.
If what is a string, it is parsed as a pattern and only matching
subtrees will be visited.
The implementation uses the root node of the pattern in combination
with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
Patterns with wildcard roots are also not allowed.
If what is an integer, it is used as a token type and visit will match
all nodes of that type (this is faster than the pattern match).
The labels arg of the visitor action method is never set (it's None)
since using a token type rather than a pattern doesn't let us set a
label.
"""
if isinstance(what, (int, long)):
self._visitType(tree, None, 0, what, visitor)
elif isinstance(what, basestring):
self._visitPattern(tree, what, visitor)
else:
raise TypeError("'what' must be string or integer")
def _visitType(self, t, parent, childIndex, ttype, visitor):
"""Do the recursive work for visit"""
if t is None:
return
if self.adaptor.getType(t) == ttype:
visitor(t, parent, childIndex, None)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._visitType(child, t, i, ttype, visitor)
def _visitPattern(self, tree, pattern, visitor):
"""
For all subtrees that match the pattern, execute the visit action.
"""
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return
rootTokenType = tpattern.getType()
def rootvisitor(tree, parent, childIndex, labels):
labels = {}
if self._parse(tree, tpattern, labels):
visitor(tree, parent, childIndex, labels)
self.visit(tree, rootTokenType, rootvisitor)
def parse(self, t, pattern, labels=None):
"""
Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
on the various nodes and '.' (dot) as the node/subtree wildcard,
return true if the pattern matches and fill the labels Map with
the labels pointing at the appropriate nodes. Return false if
the pattern is malformed or the tree does not match.
If a node specifies a text arg in pattern, then that must match
for that node in t.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
return self._parse(t, tpattern, labels)
def _parse(self, t1, t2, labels):
"""
Do the work for parse. Check to see if the t2 pattern fits the
structure and token types in t1. Check text if the pattern has
text arguments on nodes. Fill labels map with pointers to nodes
in tree matched against nodes in pattern with labels.
"""
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots (wildcard matches anything)
if not isinstance(t2, WildcardTreePattern):
if self.adaptor.getType(t1) != t2.getType():
return False
if t2.hasTextArg and self.adaptor.getText(t1) != t2.getText():
return False
if t2.label is not None and labels is not None:
# map label in pattern to node in t1
labels[t2.label] = t1
# check children
n1 = self.adaptor.getChildCount(t1)
n2 = t2.getChildCount()
if n1 != n2:
return False
for i in range(n1):
child1 = self.adaptor.getChild(t1, i)
child2 = t2.getChild(i)
if not self._parse(child1, child2, labels):
return False
return True
def equals(self, t1, t2, adaptor=None):
"""
Compare t1 and t2; return true if token types/text, structure match
exactly.
The trees are examined in their entirety so that (A B) does not match
(A B C) nor (A (B C)).
"""
if adaptor is None:
adaptor = self.adaptor
return self._equals(t1, t2, adaptor)
def _equals(self, t1, t2, adaptor):
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots
if adaptor.getType(t1) != adaptor.getType(t2):
return False
if adaptor.getText(t1) != adaptor.getText(t2):
return False
# check children
n1 = adaptor.getChildCount(t1)
n2 = adaptor.getChildCount(t2)
if n1 != n2:
return False
for i in range(n1):
child1 = adaptor.getChild(t1, i)
child2 = adaptor.getChild(t2, i)
if not self._equals(child1, child2, adaptor):
return False
return True
| Python |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import sys
import inspect
from antlr3 import runtime_version, runtime_version_str
from antlr3.constants import DEFAULT_CHANNEL, HIDDEN_CHANNEL, EOF, \
EOR_TOKEN_TYPE, INVALID_TOKEN_TYPE
from antlr3.exceptions import RecognitionException, MismatchedTokenException, \
MismatchedRangeException, MismatchedTreeNodeException, \
NoViableAltException, EarlyExitException, MismatchedSetException, \
MismatchedNotSetException, FailedPredicateException, \
BacktrackingFailed, UnwantedTokenException, MissingTokenException
from antlr3.tokens import CommonToken, EOF_TOKEN, SKIP_TOKEN
from antlr3.compat import set, frozenset, reversed
class RecognizerSharedState(object):
"""
The set of fields needed by an abstract recognizer to recognize input
and recover from errors etc... As a separate state object, it can be
shared among multiple grammars; e.g., when one grammar imports another.
These fields are publically visible but the actual state pointer per
parser is protected.
"""
def __init__(self):
# Track the set of token types that can follow any rule invocation.
# Stack grows upwards.
self.following = []
# This is true when we see an error and before having successfully
# matched a token. Prevents generation of more than one error message
# per error.
self.errorRecovery = False
# The index into the input stream where the last error occurred.
# This is used to prevent infinite loops where an error is found
# but no token is consumed during recovery...another error is found,
# ad naseum. This is a failsafe mechanism to guarantee that at least
# one token/tree node is consumed for two errors.
self.lastErrorIndex = -1
# If 0, no backtracking is going on. Safe to exec actions etc...
# If >0 then it's the level of backtracking.
self.backtracking = 0
# An array[size num rules] of Map<Integer,Integer> that tracks
# the stop token index for each rule. ruleMemo[ruleIndex] is
# the memoization table for ruleIndex. For key ruleStartIndex, you
# get back the stop token for associated rule or MEMO_RULE_FAILED.
#
# This is only used if rule memoization is on (which it is by default).
self.ruleMemo = None
## Did the recognizer encounter a syntax error? Track how many.
self.syntaxErrors = 0
# LEXER FIELDS (must be in same state object to avoid casting
# constantly in generated code and Lexer object) :(
## The goal of all lexer rules/methods is to create a token object.
# This is an instance variable as multiple rules may collaborate to
# create a single token. nextToken will return this object after
# matching lexer rule(s). If you subclass to allow multiple token
# emissions, then set this to the last token to be matched or
# something nonnull so that the auto token emit mechanism will not
# emit another token.
self.token = None
## What character index in the stream did the current token start at?
# Needed, for example, to get the text for current token. Set at
# the start of nextToken.
self.tokenStartCharIndex = -1
## The line on which the first character of the token resides
self.tokenStartLine = None
## The character position of first character within the line
self.tokenStartCharPositionInLine = None
## The channel number for the current token
self.channel = None
## The token type for the current token
self.type = None
## You can set the text for the current token to override what is in
# the input char buffer. Use setText() or can set this instance var.
self.text = None
class BaseRecognizer(object):
"""
@brief Common recognizer functionality.
A generic recognizer that can handle recognizers generated from
lexer, parser, and tree grammars. This is all the parsing
support code essentially; most of it is error recovery stuff and
backtracking.
"""
MEMO_RULE_FAILED = -2
MEMO_RULE_UNKNOWN = -1
# copies from Token object for convenience in actions
DEFAULT_TOKEN_CHANNEL = DEFAULT_CHANNEL
# for convenience in actions
HIDDEN = HIDDEN_CHANNEL
# overridden by generated subclasses
tokenNames = None
# The antlr_version attribute has been introduced in 3.1. If it is not
# overwritten in the generated recognizer, we assume a default of 3.0.1.
antlr_version = (3, 0, 1, 0)
antlr_version_str = "3.0.1"
def __init__(self, state=None):
# Input stream of the recognizer. Must be initialized by a subclass.
self.input = None
## State of a lexer, parser, or tree parser are collected into a state
# object so the state can be shared. This sharing is needed to
# have one grammar import others and share same error variables
# and other state variables. It's a kind of explicit multiple
# inheritance via delegation of methods and shared state.
if state is None:
state = RecognizerSharedState()
self._state = state
if self.antlr_version > runtime_version:
raise RuntimeError(
"ANTLR version mismatch: "
"The recognizer has been generated by V%s, but this runtime "
"is V%s. Please use the V%s runtime or higher."
% (self.antlr_version_str,
runtime_version_str,
self.antlr_version_str))
elif (self.antlr_version < (3, 1, 0, 0) and
self.antlr_version != runtime_version):
# FIXME: make the runtime compatible with 3.0.1 codegen
# and remove this block.
raise RuntimeError(
"ANTLR version mismatch: "
"The recognizer has been generated by V%s, but this runtime "
"is V%s. Please use the V%s runtime."
% (self.antlr_version_str,
runtime_version_str,
self.antlr_version_str))
# this one only exists to shut up pylint :(
def setInput(self, input):
self.input = input
def reset(self):
"""
reset the parser's state; subclasses must rewinds the input stream
"""
# wack everything related to error recovery
if self._state is None:
# no shared state work to do
return
self._state.following = []
self._state.errorRecovery = False
self._state.lastErrorIndex = -1
self._state.syntaxErrors = 0
# wack everything related to backtracking and memoization
self._state.backtracking = 0
if self._state.ruleMemo is not None:
self._state.ruleMemo = {}
def match(self, input, ttype, follow):
"""
Match current input symbol against ttype. Attempt
single token insertion or deletion error recovery. If
that fails, throw MismatchedTokenException.
To turn off single token insertion or deletion error
recovery, override mismatchRecover() and have it call
plain mismatch(), which does not recover. Then any error
in a rule will cause an exception and immediate exit from
rule. Rule would recover by resynchronizing to the set of
symbols that can follow rule ref.
"""
matchedSymbol = self.getCurrentInputSymbol(input)
if self.input.LA(1) == ttype:
self.input.consume()
self._state.errorRecovery = False
return matchedSymbol
if self._state.backtracking > 0:
# FIXME: need to return matchedSymbol here as well. damn!!
raise BacktrackingFailed
matchedSymbol = self.recoverFromMismatchedToken(input, ttype, follow)
return matchedSymbol
def matchAny(self, input):
"""Match the wildcard: in a symbol"""
self._state.errorRecovery = False
self.input.consume()
def mismatchIsUnwantedToken(self, input, ttype):
return input.LA(2) == ttype
def mismatchIsMissingToken(self, input, follow):
if follow is None:
# we have no information about the follow; we can only consume
# a single token and hope for the best
return False
# compute what can follow this grammar element reference
if EOR_TOKEN_TYPE in follow:
if len(self._state.following) > 0:
# remove EOR if we're not the start symbol
follow = follow - set([EOR_TOKEN_TYPE])
viableTokensFollowingThisRule = self.computeContextSensitiveRuleFOLLOW()
follow = follow | viableTokensFollowingThisRule
# if current token is consistent with what could come after set
# then we know we're missing a token; error recovery is free to
# "insert" the missing token
if input.LA(1) in follow or EOR_TOKEN_TYPE in follow:
return True
return False
def mismatch(self, input, ttype, follow):
"""
Factor out what to do upon token mismatch so tree parsers can behave
differently. Override and call mismatchRecover(input, ttype, follow)
to get single token insertion and deletion. Use this to turn of
single token insertion and deletion. Override mismatchRecover
to call this instead.
"""
if self.mismatchIsUnwantedToken(input, ttype):
raise UnwantedTokenException(ttype, input)
elif self.mismatchIsMissingToken(input, follow):
raise MissingTokenException(ttype, input, None)
raise MismatchedTokenException(ttype, input)
## def mismatchRecover(self, input, ttype, follow):
## if self.mismatchIsUnwantedToken(input, ttype):
## mte = UnwantedTokenException(ttype, input)
## elif self.mismatchIsMissingToken(input, follow):
## mte = MissingTokenException(ttype, input)
## else:
## mte = MismatchedTokenException(ttype, input)
## self.recoverFromMismatchedToken(input, mte, ttype, follow)
def reportError(self, e):
"""Report a recognition problem.
This method sets errorRecovery to indicate the parser is recovering
not parsing. Once in recovery mode, no errors are generated.
To get out of recovery mode, the parser must successfully match
a token (after a resync). So it will go:
1. error occurs
2. enter recovery mode, report error
3. consume until token found in resynch set
4. try to resume parsing
5. next match() will reset errorRecovery mode
If you override, make sure to update syntaxErrors if you care about
that.
"""
# if we've already reported an error and have not matched a token
# yet successfully, don't report any errors.
if self._state.errorRecovery:
return
self._state.syntaxErrors += 1 # don't count spurious
self._state.errorRecovery = True
self.displayRecognitionError(self.tokenNames, e)
def displayRecognitionError(self, tokenNames, e):
hdr = self.getErrorHeader(e)
msg = self.getErrorMessage(e, tokenNames)
self.emitErrorMessage(hdr+" "+msg)
def getErrorMessage(self, e, tokenNames):
"""
What error message should be generated for the various
exception types?
Not very object-oriented code, but I like having all error message
generation within one method rather than spread among all of the
exception classes. This also makes it much easier for the exception
handling because the exception classes do not have to have pointers back
to this object to access utility routines and so on. Also, changing
the message for an exception type would be difficult because you
would have to subclassing exception, but then somehow get ANTLR
to make those kinds of exception objects instead of the default.
This looks weird, but trust me--it makes the most sense in terms
of flexibility.
For grammar debugging, you will want to override this to add
more information such as the stack frame with
getRuleInvocationStack(e, this.getClass().getName()) and,
for no viable alts, the decision description and state etc...
Override this to change the message generated for one or more
exception types.
"""
if isinstance(e, UnwantedTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "extraneous input %s expecting %s" % (
self.getTokenErrorDisplay(e.getUnexpectedToken()),
tokenName
)
elif isinstance(e, MissingTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "missing %s at %s" % (
tokenName, self.getTokenErrorDisplay(e.token)
)
elif isinstance(e, MismatchedTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting " \
+ tokenName
elif isinstance(e, MismatchedTreeNodeException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "mismatched tree node: %s expecting %s" \
% (e.node, tokenName)
elif isinstance(e, NoViableAltException):
msg = "no viable alternative at input " \
+ self.getTokenErrorDisplay(e.token)
elif isinstance(e, EarlyExitException):
msg = "required (...)+ loop did not match anything at input " \
+ self.getTokenErrorDisplay(e.token)
elif isinstance(e, MismatchedSetException):
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedNotSetException):
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, FailedPredicateException):
msg = "rule " \
+ e.ruleName \
+ " failed predicate: {" \
+ e.predicateText \
+ "}?"
else:
msg = str(e)
return msg
def getNumberOfSyntaxErrors(self):
"""
Get number of recognition errors (lexer, parser, tree parser). Each
recognizer tracks its own number. So parser and lexer each have
separate count. Does not count the spurious errors found between
an error and next valid token match
See also reportError()
"""
return self._state.syntaxErrors
def getErrorHeader(self, e):
"""
What is the error header, normally line/character position information?
"""
return "line %d:%d" % (e.line, e.charPositionInLine)
def getTokenErrorDisplay(self, t):
"""
How should a token be displayed in an error message? The default
is to display just the text, but during development you might
want to have a lot of information spit out. Override in that case
to use t.toString() (which, for CommonToken, dumps everything about
the token). This is better than forcing you to override a method in
your token objects because you don't have to go modify your lexer
so that it creates a new Java type.
"""
s = t.text
if s is None:
if t.type == EOF:
s = "<EOF>"
else:
s = "<"+t.type+">"
return repr(s)
def emitErrorMessage(self, msg):
"""Override this method to change where error messages go"""
sys.stderr.write(msg + '\n')
def recover(self, input, re):
"""
Recover from an error found on the input stream. This is
for NoViableAlt and mismatched symbol exceptions. If you enable
single token insertion and deletion, this will usually not
handle mismatched symbol exceptions but there could be a mismatched
token that the match() routine could not recover from.
"""
# PROBLEM? what if input stream is not the same as last time
# perhaps make lastErrorIndex a member of input
if self._state.lastErrorIndex == input.index():
# uh oh, another error at same token index; must be a case
# where LT(1) is in the recovery token set so nothing is
# consumed; consume a single token so at least to prevent
# an infinite loop; this is a failsafe.
input.consume()
self._state.lastErrorIndex = input.index()
followSet = self.computeErrorRecoverySet()
self.beginResync()
self.consumeUntil(input, followSet)
self.endResync()
def beginResync(self):
"""
A hook to listen in on the token consumption during error recovery.
The DebugParser subclasses this to fire events to the listenter.
"""
pass
def endResync(self):
"""
A hook to listen in on the token consumption during error recovery.
The DebugParser subclasses this to fire events to the listenter.
"""
pass
def computeErrorRecoverySet(self):
"""
Compute the error recovery set for the current rule. During
rule invocation, the parser pushes the set of tokens that can
follow that rule reference on the stack; this amounts to
computing FIRST of what follows the rule reference in the
enclosing rule. This local follow set only includes tokens
from within the rule; i.e., the FIRST computation done by
ANTLR stops at the end of a rule.
EXAMPLE
When you find a "no viable alt exception", the input is not
consistent with any of the alternatives for rule r. The best
thing to do is to consume tokens until you see something that
can legally follow a call to r *or* any rule that called r.
You don't want the exact set of viable next tokens because the
input might just be missing a token--you might consume the
rest of the input looking for one of the missing tokens.
Consider grammar:
a : '[' b ']'
| '(' b ')'
;
b : c '^' INT ;
c : ID
| INT
;
At each rule invocation, the set of tokens that could follow
that rule is pushed on a stack. Here are the various "local"
follow sets:
FOLLOW(b1_in_a) = FIRST(']') = ']'
FOLLOW(b2_in_a) = FIRST(')') = ')'
FOLLOW(c_in_b) = FIRST('^') = '^'
Upon erroneous input "[]", the call chain is
a -> b -> c
and, hence, the follow context stack is:
depth local follow set after call to rule
0 \<EOF> a (from main())
1 ']' b
3 '^' c
Notice that ')' is not included, because b would have to have
been called from a different context in rule a for ')' to be
included.
For error recovery, we cannot consider FOLLOW(c)
(context-sensitive or otherwise). We need the combined set of
all context-sensitive FOLLOW sets--the set of all tokens that
could follow any reference in the call chain. We need to
resync to one of those tokens. Note that FOLLOW(c)='^' and if
we resync'd to that token, we'd consume until EOF. We need to
sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
In this case, for input "[]", LA(1) is in this set so we would
not consume anything and after printing an error rule c would
return normally. It would not find the required '^' though.
At this point, it gets a mismatched token error and throws an
exception (since LA(1) is not in the viable following token
set). The rule exception handler tries to recover, but finds
the same recovery set and doesn't consume anything. Rule b
exits normally returning to rule a. Now it finds the ']' (and
with the successful match exits errorRecovery mode).
So, you cna see that the parser walks up call chain looking
for the token that was a member of the recovery set.
Errors are not generated in errorRecovery mode.
ANTLR's error recovery mechanism is based upon original ideas:
"Algorithms + Data Structures = Programs" by Niklaus Wirth
and
"A note on error recovery in recursive descent parsers":
http://portal.acm.org/citation.cfm?id=947902.947905
Later, Josef Grosch had some good ideas:
"Efficient and Comfortable Error Recovery in Recursive Descent
Parsers":
ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
Like Grosch I implemented local FOLLOW sets that are combined
at run-time upon error to avoid overhead during parsing.
"""
return self.combineFollows(False)
def computeContextSensitiveRuleFOLLOW(self):
"""
Compute the context-sensitive FOLLOW set for current rule.
This is set of token types that can follow a specific rule
reference given a specific call chain. You get the set of
viable tokens that can possibly come next (lookahead depth 1)
given the current call chain. Contrast this with the
definition of plain FOLLOW for rule r:
FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
where x in T* and alpha, beta in V*; T is set of terminals and
V is the set of terminals and nonterminals. In other words,
FOLLOW(r) is the set of all tokens that can possibly follow
references to r in *any* sentential form (context). At
runtime, however, we know precisely which context applies as
we have the call chain. We may compute the exact (rather
than covering superset) set of following tokens.
For example, consider grammar:
stat : ID '=' expr ';' // FOLLOW(stat)=={EOF}
| "return" expr '.'
;
expr : atom ('+' atom)* ; // FOLLOW(expr)=={';','.',')'}
atom : INT // FOLLOW(atom)=={'+',')',';','.'}
| '(' expr ')'
;
The FOLLOW sets are all inclusive whereas context-sensitive
FOLLOW sets are precisely what could follow a rule reference.
For input input "i=(3);", here is the derivation:
stat => ID '=' expr ';'
=> ID '=' atom ('+' atom)* ';'
=> ID '=' '(' expr ')' ('+' atom)* ';'
=> ID '=' '(' atom ')' ('+' atom)* ';'
=> ID '=' '(' INT ')' ('+' atom)* ';'
=> ID '=' '(' INT ')' ';'
At the "3" token, you'd have a call chain of
stat -> expr -> atom -> expr -> atom
What can follow that specific nested ref to atom? Exactly ')'
as you can see by looking at the derivation of this specific
input. Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
You want the exact viable token set when recovering from a
token mismatch. Upon token mismatch, if LA(1) is member of
the viable next token set, then you know there is most likely
a missing token in the input stream. "Insert" one by just not
throwing an exception.
"""
return self.combineFollows(True)
def combineFollows(self, exact):
followSet = set()
for idx, localFollowSet in reversed(list(enumerate(self._state.following))):
followSet |= localFollowSet
if exact:
# can we see end of rule?
if EOR_TOKEN_TYPE in localFollowSet:
# Only leave EOR in set if at top (start rule); this lets
# us know if have to include follow(start rule); i.e., EOF
if idx > 0:
followSet.remove(EOR_TOKEN_TYPE)
else:
# can't see end of rule, quit
break
return followSet
def recoverFromMismatchedToken(self, input, ttype, follow):
"""Attempt to recover from a single missing or extra token.
EXTRA TOKEN
LA(1) is not what we are looking for. If LA(2) has the right token,
however, then assume LA(1) is some extra spurious token. Delete it
and LA(2) as if we were doing a normal match(), which advances the
input.
MISSING TOKEN
If current token is consistent with what could come after
ttype then it is ok to 'insert' the missing token, else throw
exception For example, Input 'i=(3;' is clearly missing the
')'. When the parser returns from the nested call to expr, it
will have call chain:
stat -> expr -> atom
and it will be trying to match the ')' at this point in the
derivation:
=> ID '=' '(' INT ')' ('+' atom)* ';'
^
match() will see that ';' doesn't match ')' and report a
mismatched token error. To recover, it sees that LA(1)==';'
is in the set of tokens that can follow the ')' token
reference in rule atom. It can assume that you forgot the ')'.
"""
e = None
# if next token is what we are looking for then "delete" this token
if self. mismatchIsUnwantedToken(input, ttype):
e = UnwantedTokenException(ttype, input)
self.beginResync()
input.consume() # simply delete extra token
self.endResync()
# report after consuming so AW sees the token in the exception
self.reportError(e)
# we want to return the token we're actually matching
matchedSymbol = self.getCurrentInputSymbol(input)
# move past ttype token as if all were ok
input.consume()
return matchedSymbol
# can't recover with single token deletion, try insertion
if self.mismatchIsMissingToken(input, follow):
inserted = self.getMissingSymbol(input, e, ttype, follow)
e = MissingTokenException(ttype, input, inserted)
# report after inserting so AW sees the token in the exception
self.reportError(e)
return inserted
# even that didn't work; must throw the exception
e = MismatchedTokenException(ttype, input)
raise e
def recoverFromMismatchedSet(self, input, e, follow):
"""Not currently used"""
if self.mismatchIsMissingToken(input, follow):
self.reportError(e)
# we don't know how to conjure up a token for sets yet
return self.getMissingSymbol(input, e, INVALID_TOKEN_TYPE, follow)
# TODO do single token deletion like above for Token mismatch
raise e
def getCurrentInputSymbol(self, input):
"""
Match needs to return the current input symbol, which gets put
into the label for the associated token ref; e.g., x=ID. Token
and tree parsers need to return different objects. Rather than test
for input stream type or change the IntStream interface, I use
a simple method to ask the recognizer to tell me what the current
input symbol is.
This is ignored for lexers.
"""
return None
def getMissingSymbol(self, input, e, expectedTokenType, follow):
"""Conjure up a missing token during error recovery.
The recognizer attempts to recover from single missing
symbols. But, actions might refer to that missing symbol.
For example, x=ID {f($x);}. The action clearly assumes
that there has been an identifier matched previously and that
$x points at that token. If that token is missing, but
the next token in the stream is what we want we assume that
this token is missing and we keep going. Because we
have to return some token to replace the missing token,
we have to conjure one up. This method gives the user control
over the tokens returned for missing tokens. Mostly,
you will want to create something special for identifier
tokens. For literals such as '{' and ',', the default
action in the parser or tree parser works. It simply creates
a CommonToken of the appropriate type. The text will be the token.
If you change what tokens must be created by the lexer,
override this method to create the appropriate tokens.
"""
return None
## def recoverFromMissingElement(self, input, e, follow):
## """
## This code is factored out from mismatched token and mismatched set
## recovery. It handles "single token insertion" error recovery for
## both. No tokens are consumed to recover from insertions. Return
## true if recovery was possible else return false.
## """
## if self.mismatchIsMissingToken(input, follow):
## self.reportError(e)
## return True
## # nothing to do; throw exception
## return False
def consumeUntil(self, input, tokenTypes):
"""
Consume tokens until one matches the given token or token set
tokenTypes can be a single token type or a set of token types
"""
if not isinstance(tokenTypes, (set, frozenset)):
tokenTypes = frozenset([tokenTypes])
ttype = input.LA(1)
while ttype != EOF and ttype not in tokenTypes:
input.consume()
ttype = input.LA(1)
def getRuleInvocationStack(self):
"""
Return List<String> of the rules in your parser instance
leading up to a call to this method. You could override if
you want more details such as the file/line info of where
in the parser java code a rule is invoked.
This is very useful for error messages and for context-sensitive
error recovery.
You must be careful, if you subclass a generated recognizers.
The default implementation will only search the module of self
for rules, but the subclass will not contain any rules.
You probably want to override this method to look like
def getRuleInvocationStack(self):
return self._getRuleInvocationStack(<class>.__module__)
where <class> is the class of the generated recognizer, e.g.
the superclass of self.
"""
return self._getRuleInvocationStack(self.__module__)
def _getRuleInvocationStack(cls, module):
"""
A more general version of getRuleInvocationStack where you can
pass in, for example, a RecognitionException to get it's rule
stack trace. This routine is shared with all recognizers, hence,
static.
TODO: move to a utility class or something; weird having lexer call
this
"""
# mmmhhh,... perhaps look at the first argument
# (f_locals[co_varnames[0]]?) and test if it's a (sub)class of
# requested recognizer...
rules = []
for frame in reversed(inspect.stack()):
code = frame[0].f_code
codeMod = inspect.getmodule(code)
if codeMod is None:
continue
# skip frames not in requested module
if codeMod.__name__ != module:
continue
# skip some unwanted names
if code.co_name in ('nextToken', '<module>'):
continue
rules.append(code.co_name)
return rules
_getRuleInvocationStack = classmethod(_getRuleInvocationStack)
def getBacktrackingLevel(self):
return self._state.backtracking
def getGrammarFileName(self):
"""For debugging and other purposes, might want the grammar name.
Have ANTLR generate an implementation for this method.
"""
return self.grammarFileName
def getSourceName(self):
raise NotImplementedError
def toStrings(self, tokens):
"""A convenience method for use most often with template rewrites.
Convert a List<Token> to List<String>
"""
if tokens is None:
return None
return [token.text for token in tokens]
def getRuleMemoization(self, ruleIndex, ruleStartIndex):
"""
Given a rule number and a start token index number, return
MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
start index. If this rule has parsed input starting from the
start index before, then return where the rule stopped parsing.
It returns the index of the last token matched by the rule.
"""
if ruleIndex not in self._state.ruleMemo:
self._state.ruleMemo[ruleIndex] = {}
return self._state.ruleMemo[ruleIndex].get(
ruleStartIndex, self.MEMO_RULE_UNKNOWN
)
def alreadyParsedRule(self, input, ruleIndex):
"""
Has this rule already parsed input at the current index in the
input stream? Return the stop token index or MEMO_RULE_UNKNOWN.
If we attempted but failed to parse properly before, return
MEMO_RULE_FAILED.
This method has a side-effect: if we have seen this input for
this rule and successfully parsed before, then seek ahead to
1 past the stop token matched for this rule last time.
"""
stopIndex = self.getRuleMemoization(ruleIndex, input.index())
if stopIndex == self.MEMO_RULE_UNKNOWN:
return False
if stopIndex == self.MEMO_RULE_FAILED:
raise BacktrackingFailed
else:
input.seek(stopIndex + 1)
return True
def memoize(self, input, ruleIndex, ruleStartIndex, success):
"""
Record whether or not this rule parsed the input at this position
successfully.
"""
if success:
stopTokenIndex = input.index() - 1
else:
stopTokenIndex = self.MEMO_RULE_FAILED
if ruleIndex in self._state.ruleMemo:
self._state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex
def traceIn(self, ruleName, ruleIndex, inputSymbol):
sys.stdout.write("enter %s %s" % (ruleName, inputSymbol))
## if self._state.failed:
## sys.stdout.write(" failed=%s" % self._state.failed)
if self._state.backtracking > 0:
sys.stdout.write(" backtracking=%s" % self._state.backtracking)
sys.stdout.write('\n')
def traceOut(self, ruleName, ruleIndex, inputSymbol):
sys.stdout.write("exit %s %s" % (ruleName, inputSymbol))
## if self._state.failed:
## sys.stdout.write(" failed=%s" % self._state.failed)
if self._state.backtracking > 0:
sys.stdout.write(" backtracking=%s" % self._state.backtracking)
sys.stdout.write('\n')
class TokenSource(object):
"""
@brief Abstract baseclass for token producers.
A source of tokens must provide a sequence of tokens via nextToken()
and also must reveal it's source of characters; CommonToken's text is
computed from a CharStream; it only store indices into the char stream.
Errors from the lexer are never passed to the parser. Either you want
to keep going or you do not upon token recognition error. If you do not
want to continue lexing then you do not want to continue parsing. Just
throw an exception not under RecognitionException and Java will naturally
toss you all the way out of the recognizers. If you want to continue
lexing then you should not throw an exception to the parser--it has already
requested a token. Keep lexing until you get a valid one. Just report
errors and keep going, looking for a valid token.
"""
def nextToken(self):
"""Return a Token object from your input stream (usually a CharStream).
Do not fail/return upon lexing error; keep chewing on the characters
until you get a good one; errors are not passed through to the parser.
"""
raise NotImplementedError
def __iter__(self):
"""The TokenSource is an interator.
The iteration will not include the final EOF token, see also the note
for the next() method.
"""
return self
def next(self):
"""Return next token or raise StopIteration.
Note that this will raise StopIteration when hitting the EOF token,
so EOF will not be part of the iteration.
"""
token = self.nextToken()
if token is None or token.type == EOF:
raise StopIteration
return token
class Lexer(BaseRecognizer, TokenSource):
"""
@brief Baseclass for generated lexer classes.
A lexer is recognizer that draws input symbols from a character stream.
lexer grammars result in a subclass of this object. A Lexer object
uses simplified match() and error recovery mechanisms in the interest
of speed.
"""
def __init__(self, input, state=None):
BaseRecognizer.__init__(self, state)
TokenSource.__init__(self)
# Where is the lexer drawing characters from?
self.input = input
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
# rewind the input
self.input.seek(0)
if self._state is None:
# no shared state work to do
return
# wack Lexer state variables
self._state.token = None
self._state.type = INVALID_TOKEN_TYPE
self._state.channel = DEFAULT_CHANNEL
self._state.tokenStartCharIndex = -1
self._state.tokenStartLine = -1
self._state.tokenStartCharPositionInLine = -1
self._state.text = None
def nextToken(self):
"""
Return a token from this source; i.e., match a token on the char
stream.
"""
while 1:
self._state.token = None
self._state.channel = DEFAULT_CHANNEL
self._state.tokenStartCharIndex = self.input.index()
self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
self._state.tokenStartLine = self.input.line
self._state.text = None
if self.input.LA(1) == EOF:
return EOF_TOKEN
try:
self.mTokens()
if self._state.token is None:
self.emit()
elif self._state.token == SKIP_TOKEN:
continue
return self._state.token
except NoViableAltException, re:
self.reportError(re)
self.recover(re) # throw out current char and try again
except RecognitionException, re:
self.reportError(re)
# match() routine has already called recover()
def skip(self):
"""
Instruct the lexer to skip creating a token for current lexer rule
and look for another token. nextToken() knows to keep looking when
a lexer rule finishes with token set to SKIP_TOKEN. Recall that
if token==null at end of any token rule, it creates one for you
and emits it.
"""
self._state.token = SKIP_TOKEN
def mTokens(self):
"""This is the lexer entry point that sets instance var 'token'"""
# abstract method
raise NotImplementedError
def setCharStream(self, input):
"""Set the char stream and reset the lexer"""
self.input = None
self.reset()
self.input = input
def getSourceName(self):
return self.input.getSourceName()
def emit(self, token=None):
"""
The standard method called to automatically emit a token at the
outermost lexical rule. The token object should point into the
char buffer start..stop. If there is a text override in 'text',
use that to set the token's text. Override this method to emit
custom Token objects.
If you are building trees, then you should also override
Parser or TreeParser.getMissingSymbol().
"""
if token is None:
token = CommonToken(
input=self.input,
type=self._state.type,
channel=self._state.channel,
start=self._state.tokenStartCharIndex,
stop=self.getCharIndex()-1
)
token.line = self._state.tokenStartLine
token.text = self._state.text
token.charPositionInLine = self._state.tokenStartCharPositionInLine
self._state.token = token
return token
def match(self, s):
if isinstance(s, basestring):
for c in s:
if self.input.LA(1) != ord(c):
if self._state.backtracking > 0:
raise BacktrackingFailed
mte = MismatchedTokenException(c, self.input)
self.recover(mte)
raise mte
self.input.consume()
else:
if self.input.LA(1) != s:
if self._state.backtracking > 0:
raise BacktrackingFailed
mte = MismatchedTokenException(unichr(s), self.input)
self.recover(mte) # don't really recover; just consume in lexer
raise mte
self.input.consume()
def matchAny(self):
self.input.consume()
def matchRange(self, a, b):
if self.input.LA(1) < a or self.input.LA(1) > b:
if self._state.backtracking > 0:
raise BacktrackingFailed
mre = MismatchedRangeException(unichr(a), unichr(b), self.input)
self.recover(mre)
raise mre
self.input.consume()
def getLine(self):
return self.input.line
def getCharPositionInLine(self):
return self.input.charPositionInLine
def getCharIndex(self):
"""What is the index of the current character of lookahead?"""
return self.input.index()
def getText(self):
"""
Return the text matched so far for the current token or any
text override.
"""
if self._state.text is not None:
return self._state.text
return self.input.substring(
self._state.tokenStartCharIndex,
self.getCharIndex()-1
)
def setText(self, text):
"""
Set the complete text of this token; it wipes any previous
changes to the text.
"""
self._state.text = text
text = property(getText, setText)
def reportError(self, e):
## TODO: not thought about recovery in lexer yet.
## # if we've already reported an error and have not matched a token
## # yet successfully, don't report any errors.
## if self.errorRecovery:
## #System.err.print("[SPURIOUS] ");
## return;
##
## self.errorRecovery = True
self.displayRecognitionError(self.tokenNames, e)
def getErrorMessage(self, e, tokenNames):
msg = None
if isinstance(e, MismatchedTokenException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting " \
+ self.getCharErrorDisplay(e.expecting)
elif isinstance(e, NoViableAltException):
msg = "no viable alternative at character " \
+ self.getCharErrorDisplay(e.c)
elif isinstance(e, EarlyExitException):
msg = "required (...)+ loop did not match anything at character " \
+ self.getCharErrorDisplay(e.c)
elif isinstance(e, MismatchedNotSetException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedSetException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedRangeException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ self.getCharErrorDisplay(e.a) \
+ ".." \
+ self.getCharErrorDisplay(e.b)
else:
msg = BaseRecognizer.getErrorMessage(self, e, tokenNames)
return msg
def getCharErrorDisplay(self, c):
if c == EOF:
c = '<EOF>'
return repr(c)
def recover(self, re):
"""
Lexers can normally match any char in it's vocabulary after matching
a token, so do the easy thing and just kill a character and hope
it all works out. You can instead use the rule invocation stack
to do sophisticated error recovery if you are in a fragment rule.
"""
self.input.consume()
def traceIn(self, ruleName, ruleIndex):
inputSymbol = "%s line=%d:%s" % (self.input.LT(1),
self.getLine(),
self.getCharPositionInLine()
)
BaseRecognizer.traceIn(self, ruleName, ruleIndex, inputSymbol)
def traceOut(self, ruleName, ruleIndex):
inputSymbol = "%s line=%d:%s" % (self.input.LT(1),
self.getLine(),
self.getCharPositionInLine()
)
BaseRecognizer.traceOut(self, ruleName, ruleIndex, inputSymbol)
class Parser(BaseRecognizer):
"""
@brief Baseclass for generated parser classes.
"""
def __init__(self, lexer, state=None):
BaseRecognizer.__init__(self, state)
self.setTokenStream(lexer)
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
self.input.seek(0) # rewind the input
def getCurrentInputSymbol(self, input):
return input.LT(1)
def getMissingSymbol(self, input, e, expectedTokenType, follow):
if expectedTokenType == EOF:
tokenText = "<missing EOF>"
else:
tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
t = CommonToken(type=expectedTokenType, text=tokenText)
current = input.LT(1)
if current.type == EOF:
current = input.LT(-1)
if current is not None:
t.line = current.line
t.charPositionInLine = current.charPositionInLine
t.channel = DEFAULT_CHANNEL
return t
def setTokenStream(self, input):
"""Set the token stream and reset the parser"""
self.input = None
self.reset()
self.input = input
def getTokenStream(self):
return self.input
def getSourceName(self):
return self.input.getSourceName()
def traceIn(self, ruleName, ruleIndex):
BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
def traceOut(self, ruleName, ruleIndex):
BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
class RuleReturnScope(object):
"""
Rules can return start/stop info as well as possible trees and templates.
"""
def getStart(self):
"""Return the start token or tree."""
return None
def getStop(self):
"""Return the stop token or tree."""
return None
def getTree(self):
"""Has a value potentially if output=AST."""
return None
def getTemplate(self):
"""Has a value potentially if output=template."""
return None
class ParserRuleReturnScope(RuleReturnScope):
"""
Rules that return more than a single value must return an object
containing all the values. Besides the properties defined in
RuleLabelScope.predefinedRulePropertiesScope there may be user-defined
return values. This class simply defines the minimum properties that
are always defined and methods to access the others that might be
available depending on output option such as template and tree.
Note text is not an actual property of the return value, it is computed
from start and stop using the input stream's toString() method. I
could add a ctor to this so that we can pass in and store the input
stream, but I'm not sure we want to do that. It would seem to be undefined
to get the .text property anyway if the rule matches tokens from multiple
input streams.
I do not use getters for fields of objects that are used simply to
group values such as this aggregate. The getters/setters are there to
satisfy the superclass interface.
"""
def __init__(self):
self.start = None
self.stop = None
def getStart(self):
return self.start
def getStop(self):
return self.stop
| Python |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import sys
import optparse
import antlr3
class _Main(object):
def __init__(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
def parseOptions(self, argv):
optParser = optparse.OptionParser()
optParser.add_option(
"--encoding",
action="store",
type="string",
dest="encoding"
)
optParser.add_option(
"--input",
action="store",
type="string",
dest="input"
)
optParser.add_option(
"--interactive", "-i",
action="store_true",
dest="interactive"
)
optParser.add_option(
"--no-output",
action="store_true",
dest="no_output"
)
optParser.add_option(
"--profile",
action="store_true",
dest="profile"
)
optParser.add_option(
"--hotshot",
action="store_true",
dest="hotshot"
)
self.setupOptions(optParser)
return optParser.parse_args(argv[1:])
def setupOptions(self, optParser):
pass
def execute(self, argv):
options, args = self.parseOptions(argv)
self.setUp(options)
if options.interactive:
while True:
try:
input = raw_input(">>> ")
except (EOFError, KeyboardInterrupt):
self.stdout.write("\nBye.\n")
break
inStream = antlr3.ANTLRStringStream(input)
self.parseStream(options, inStream)
else:
if options.input is not None:
inStream = antlr3.ANTLRStringStream(options.input)
elif len(args) == 1 and args[0] != '-':
inStream = antlr3.ANTLRFileStream(
args[0], encoding=options.encoding
)
else:
inStream = antlr3.ANTLRInputStream(
self.stdin, encoding=options.encoding
)
if options.profile:
try:
import cProfile as profile
except ImportError:
import profile
profile.runctx(
'self.parseStream(options, inStream)',
globals(),
locals(),
'profile.dat'
)
import pstats
stats = pstats.Stats('profile.dat')
stats.strip_dirs()
stats.sort_stats('time')
stats.print_stats(100)
elif options.hotshot:
import hotshot
profiler = hotshot.Profile('hotshot.dat')
profiler.runctx(
'self.parseStream(options, inStream)',
globals(),
locals()
)
else:
self.parseStream(options, inStream)
def setUp(self, options):
pass
def parseStream(self, options, inStream):
raise NotImplementedError
def write(self, options, text):
if not options.no_output:
self.stdout.write(text)
def writeln(self, options, text):
self.write(options, text + '\n')
class LexerMain(_Main):
def __init__(self, lexerClass):
_Main.__init__(self)
self.lexerClass = lexerClass
def parseStream(self, options, inStream):
lexer = self.lexerClass(inStream)
for token in lexer:
self.writeln(options, str(token))
class ParserMain(_Main):
def __init__(self, lexerClassName, parserClass):
_Main.__init__(self)
self.lexerClassName = lexerClassName
self.lexerClass = None
self.parserClass = parserClass
def setupOptions(self, optParser):
optParser.add_option(
"--lexer",
action="store",
type="string",
dest="lexerClass",
default=self.lexerClassName
)
optParser.add_option(
"--rule",
action="store",
type="string",
dest="parserRule"
)
def setUp(self, options):
lexerMod = __import__(options.lexerClass)
self.lexerClass = getattr(lexerMod, options.lexerClass)
def parseStream(self, options, inStream):
lexer = self.lexerClass(inStream)
tokenStream = antlr3.CommonTokenStream(lexer)
parser = self.parserClass(tokenStream)
result = getattr(parser, options.parserRule)()
if result is not None:
if hasattr(result, 'tree'):
if result.tree is not None:
self.writeln(options, result.tree.toStringTree())
else:
self.writeln(options, repr(result))
class WalkerMain(_Main):
def __init__(self, walkerClass):
_Main.__init__(self)
self.lexerClass = None
self.parserClass = None
self.walkerClass = walkerClass
def setupOptions(self, optParser):
optParser.add_option(
"--lexer",
action="store",
type="string",
dest="lexerClass",
default=None
)
optParser.add_option(
"--parser",
action="store",
type="string",
dest="parserClass",
default=None
)
optParser.add_option(
"--parser-rule",
action="store",
type="string",
dest="parserRule",
default=None
)
optParser.add_option(
"--rule",
action="store",
type="string",
dest="walkerRule"
)
def setUp(self, options):
lexerMod = __import__(options.lexerClass)
self.lexerClass = getattr(lexerMod, options.lexerClass)
parserMod = __import__(options.parserClass)
self.parserClass = getattr(parserMod, options.parserClass)
def parseStream(self, options, inStream):
lexer = self.lexerClass(inStream)
tokenStream = antlr3.CommonTokenStream(lexer)
parser = self.parserClass(tokenStream)
result = getattr(parser, options.parserRule)()
if result is not None:
assert hasattr(result, 'tree'), "Parser did not return an AST"
nodeStream = antlr3.tree.CommonTreeNodeStream(result.tree)
nodeStream.setTokenStream(tokenStream)
walker = self.walkerClass(nodeStream)
result = getattr(walker, options.walkerRule)()
if result is not None:
if hasattr(result, 'tree'):
self.writeln(options, result.tree.toStringTree())
else:
self.writeln(options, repr(result))
| Python |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
EOF = -1
## All tokens go to the parser (unless skip() is called in that rule)
# on a particular "channel". The parser tunes to a particular channel
# so that whitespace etc... can go to the parser on a "hidden" channel.
DEFAULT_CHANNEL = 0
## Anything on different channel than DEFAULT_CHANNEL is not parsed
# by parser.
HIDDEN_CHANNEL = 99
# Predefined token types
EOR_TOKEN_TYPE = 1
##
# imaginary tree navigation type; traverse "get child" link
DOWN = 2
##
#imaginary tree navigation type; finish with a child list
UP = 3
MIN_TOKEN_TYPE = UP+1
INVALID_TOKEN_TYPE = 0
| Python |
""" @package antlr3.tree
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE
from antlr3.recognizers import BaseRecognizer, RuleReturnScope
from antlr3.streams import IntStream
from antlr3.tokens import CommonToken, Token, INVALID_TOKEN
from antlr3.exceptions import MismatchedTreeNodeException, \
MissingTokenException, UnwantedTokenException, MismatchedTokenException, \
NoViableAltException
############################################################################
#
# tree related exceptions
#
############################################################################
class RewriteCardinalityException(RuntimeError):
"""
@brief Base class for all exceptions thrown during AST rewrite construction.
This signifies a case where the cardinality of two or more elements
in a subrule are different: (ID INT)+ where |ID|!=|INT|
"""
def __init__(self, elementDescription):
RuntimeError.__init__(self, elementDescription)
self.elementDescription = elementDescription
def getMessage(self):
return self.elementDescription
class RewriteEarlyExitException(RewriteCardinalityException):
"""@brief No elements within a (...)+ in a rewrite rule"""
def __init__(self, elementDescription=None):
RewriteCardinalityException.__init__(self, elementDescription)
class RewriteEmptyStreamException(RewriteCardinalityException):
"""
@brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream
"""
pass
############################################################################
#
# basic Tree and TreeAdaptor interfaces
#
############################################################################
class Tree(object):
"""
@brief Abstract baseclass for tree nodes.
What does a tree look like? ANTLR has a number of support classes
such as CommonTreeNodeStream that work on these kinds of trees. You
don't have to make your trees implement this interface, but if you do,
you'll be able to use more support code.
NOTE: When constructing trees, ANTLR can build any kind of tree; it can
even use Token objects as trees if you add a child list to your tokens.
This is a tree node without any payload; just navigation and factory stuff.
"""
def getChild(self, i):
raise NotImplementedError
def getChildCount(self):
raise NotImplementedError
def getParent(self):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def setParent(self, t):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def getChildIndex(self):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def setChildIndex(self, index):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def freshenParentAndChildIndexes(self):
"""Set the parent and child index values for all children"""
raise NotImplementedError
def addChild(self, t):
"""
Add t as a child to this node. If t is null, do nothing. If t
is nil, add all children of t to this' children.
"""
raise NotImplementedError
def setChild(self, i, t):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, i):
raise NotImplementedError
def replaceChildren(self, startChildIndex, stopChildIndex, t):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
raise NotImplementedError
def isNil(self):
"""
Indicates the node is a nil node but may still have children, meaning
the tree is a flat list.
"""
raise NotImplementedError
def getTokenStartIndex(self):
"""
What is the smallest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStartIndex(self, index):
raise NotImplementedError
def getTokenStopIndex(self):
"""
What is the largest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStopIndex(self, index):
raise NotImplementedError
def dupNode(self):
raise NotImplementedError
def getType(self):
"""Return a token type; needed for tree parsing."""
raise NotImplementedError
def getText(self):
raise NotImplementedError
def getLine(self):
"""
In case we don't have a token payload, what is the line for errors?
"""
raise NotImplementedError
def getCharPositionInLine(self):
raise NotImplementedError
def toStringTree(self):
raise NotImplementedError
def toString(self):
raise NotImplementedError
class TreeAdaptor(object):
"""
@brief Abstract baseclass for tree adaptors.
How to create and navigate trees. Rather than have a separate factory
and adaptor, I've merged them. Makes sense to encapsulate.
This takes the place of the tree construction code generated in the
generated code in 2.x and the ASTFactory.
I do not need to know the type of a tree at all so they are all
generic Objects. This may increase the amount of typecasting needed. :(
"""
# C o n s t r u c t i o n
def createWithPayload(self, payload):
"""
Create a tree node from Token object; for CommonTree type trees,
then the token just becomes the payload. This is the most
common create call.
Override if you want another kind of node to be built.
"""
raise NotImplementedError
def dupNode(self, treeNode):
"""Duplicate a single tree node.
Override if you want another kind of node to be built."""
raise NotImplementedError
def dupTree(self, tree):
"""Duplicate tree recursively, using dupNode() for each node"""
raise NotImplementedError
def nil(self):
"""
Return a nil node (an empty but non-null node) that can hold
a list of element as the children. If you want a flat tree (a list)
use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
"""
raise NotImplementedError
def errorNode(self, input, start, stop, exc):
"""
Return a tree node representing an error. This node records the
tokens consumed during error recovery. The start token indicates the
input symbol at which the error was detected. The stop token indicates
the last symbol consumed during recovery.
You must specify the input stream so that the erroneous text can
be packaged up in the error node. The exception could be useful
to some applications; default implementation stores ptr to it in
the CommonErrorNode.
This only makes sense during token parsing, not tree parsing.
Tree parsing should happen only when parsing and tree construction
succeed.
"""
raise NotImplementedError
def isNil(self, tree):
"""Is tree considered a nil node used to make lists of child nodes?"""
raise NotImplementedError
def addChild(self, t, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs. Do nothing if t or child is null.
"""
raise NotImplementedError
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
raise NotImplementedError
def rulePostProcessing(self, root):
"""
Given the root of the subtree created for this rule, post process
it to do any simplifications or whatever you want. A required
behavior is to convert ^(nil singleSubtree) to singleSubtree
as the setting of start/stop indexes relies on a single non-nil root
for non-flat trees.
Flat trees such as for lists like "idlist : ID+ ;" are left alone
unless there is only one ID. For a list, the start/stop indexes
are set in the nil node.
This method is executed after all rule tree construction and right
before setTokenBoundaries().
"""
raise NotImplementedError
def getUniqueID(self, node):
"""For identifying trees.
How to identify nodes so we can say "add node to a prior node"?
Even becomeRoot is an issue. Use System.identityHashCode(node)
usually.
"""
raise NotImplementedError
# R e w r i t e R u l e s
def createFromToken(self, tokenType, fromToken, text=None):
"""
Create a new node derived from a token, with a new token type and
(optionally) new text.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"].
This should invoke createToken(Token).
"""
raise NotImplementedError
def createFromType(self, tokenType, text):
"""Create a new node derived from a token, with a new token type.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG["IMAG"].
This should invoke createToken(int,String).
"""
raise NotImplementedError
# C o n t e n t
def getType(self, t):
"""For tree parsing, I need to know the token type of a node"""
raise NotImplementedError
def setType(self, t, type):
"""Node constructors can set the type of a node"""
raise NotImplementedError
def getText(self, t):
raise NotImplementedError
def setText(self, t, text):
"""Node constructors can set the text of a node"""
raise NotImplementedError
def getToken(self, t):
"""Return the token object from which this node was created.
Currently used only for printing an error message.
The error display routine in BaseRecognizer needs to
display where the input the error occurred. If your
tree of limitation does not store information that can
lead you to the token, you can create a token filled with
the appropriate information and pass that back. See
BaseRecognizer.getErrorMessage().
"""
raise NotImplementedError
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Where are the bounds in the input token stream for this node and
all children? Each rule that creates AST nodes will call this
method right before returning. Flat trees (i.e., lists) will
still usually have a nil root node just to hold the children list.
That node would contain the start/stop indexes then.
"""
raise NotImplementedError
def getTokenStartIndex(self, t):
"""
Get the token start index for this subtree; return -1 if no such index
"""
raise NotImplementedError
def getTokenStopIndex(self, t):
"""
Get the token stop index for this subtree; return -1 if no such index
"""
raise NotImplementedError
# N a v i g a t i o n / T r e e P a r s i n g
def getChild(self, t, i):
"""Get a child 0..n-1 node"""
raise NotImplementedError
def setChild(self, t, i, child):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, t, i):
"""Remove ith child and shift children down from right."""
raise NotImplementedError
def getChildCount(self, t):
"""How many children? If 0, then this is a leaf node"""
raise NotImplementedError
def getParent(self, t):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setParent(self, t, parent):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def getChildIndex(self, t):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setChildIndex(self, t, index):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
# Misc
def create(self, *args):
"""
Deprecated, use createWithPayload, createFromToken or createFromType.
This method only exists to mimic the Java interface of TreeAdaptor.
"""
if len(args) == 1 and isinstance(args[0], Token):
# Object create(Token payload);
## warnings.warn(
## "Using create() is deprecated, use createWithPayload()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createWithPayload(args[0])
if (len(args) == 2
and isinstance(args[0], (int, long))
and isinstance(args[1], Token)
):
# Object create(int tokenType, Token fromToken);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1])
if (len(args) == 3
and isinstance(args[0], (int, long))
and isinstance(args[1], Token)
and isinstance(args[2], basestring)
):
# Object create(int tokenType, Token fromToken, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1], args[2])
if (len(args) == 2
and isinstance(args[0], (int, long))
and isinstance(args[1], basestring)
):
# Object create(int tokenType, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromType()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromType(args[0], args[1])
raise TypeError(
"No create method with this signature found: %s"
% (', '.join(type(v).__name__ for v in args))
)
############################################################################
#
# base implementation of Tree and TreeAdaptor
#
# Tree
# \- BaseTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
#
############################################################################
class BaseTree(Tree):
"""
@brief A generic tree implementation with no payload.
You must subclass to
actually have any user data. ANTLR v3 uses a list of children approach
instead of the child-sibling approach in v2. A flat tree (a list) is
an empty node whose children represent the list. An empty, but
non-null node is called "nil".
"""
# BaseTree is abstract, no need to complain about not implemented abstract
# methods
# pylint: disable-msg=W0223
def __init__(self, node=None):
"""
Create a new node from an existing node does nothing for BaseTree
as there are no fields other than the children list, which cannot
be copied as the children are not considered part of this node.
"""
Tree.__init__(self)
self.children = []
self.parent = None
self.childIndex = 0
def getChild(self, i):
try:
return self.children[i]
except IndexError:
return None
def getChildren(self):
"""@brief Get the children internal List
Note that if you directly mess with
the list, do so at your own risk.
"""
# FIXME: mark as deprecated
return self.children
def getFirstChildWithType(self, treeType):
for child in self.children:
if child.getType() == treeType:
return child
return None
def getChildCount(self):
return len(self.children)
def addChild(self, childTree):
"""Add t as child of this node.
Warning: if t has no children, but child does
and child isNil then this routine moves children to t via
t.children = child.children; i.e., without copying the array.
"""
# this implementation is much simpler and probably less efficient
# than the mumbo-jumbo that Ter did for the Java runtime.
if childTree is None:
return
if childTree.isNil():
# t is an empty node possibly with children
if self.children is childTree.children:
raise ValueError("attempt to add child list to itself")
# fix parent pointer and childIndex for new children
for idx, child in enumerate(childTree.children):
child.parent = self
child.childIndex = len(self.children) + idx
self.children += childTree.children
else:
# child is not nil (don't care about children)
self.children.append(childTree)
childTree.parent = self
childTree.childIndex = len(self.children) - 1
def addChildren(self, children):
"""Add all elements of kids list as children of this node"""
self.children += children
def setChild(self, i, t):
if t is None:
return
if t.isNil():
raise ValueError("Can't set single child to a list")
self.children[i] = t
t.parent = self
t.childIndex = i
def deleteChild(self, i):
killed = self.children[i]
del self.children[i]
# walk rest and decrement their child indexes
for idx, child in enumerate(self.children[i:]):
child.childIndex = i + idx
return killed
def replaceChildren(self, startChildIndex, stopChildIndex, newTree):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
if (startChildIndex >= len(self.children)
or stopChildIndex >= len(self.children)
):
raise IndexError("indexes invalid")
replacingHowMany = stopChildIndex - startChildIndex + 1
# normalize to a list of children to add: newChildren
if newTree.isNil():
newChildren = newTree.children
else:
newChildren = [newTree]
replacingWithHowMany = len(newChildren)
delta = replacingHowMany - replacingWithHowMany
if delta == 0:
# if same number of nodes, do direct replace
for idx, child in enumerate(newChildren):
self.children[idx + startChildIndex] = child
child.parent = self
child.childIndex = idx + startChildIndex
else:
# length of children changes...
# ...delete replaced segment...
del self.children[startChildIndex:stopChildIndex+1]
# ...insert new segment...
self.children[startChildIndex:startChildIndex] = newChildren
# ...and fix indeces
self.freshenParentAndChildIndexes(startChildIndex)
def isNil(self):
return False
def freshenParentAndChildIndexes(self, offset=0):
for idx, child in enumerate(self.children[offset:]):
child.childIndex = idx + offset
child.parent = self
def sanityCheckParentAndChildIndexes(self, parent=None, i=-1):
if parent != self.parent:
raise ValueError(
"parents don't match; expected %r found %r"
% (parent, self.parent)
)
if i != self.childIndex:
raise ValueError(
"child indexes don't match; expected %d found %d"
% (i, self.childIndex)
)
for idx, child in enumerate(self.children):
child.sanityCheckParentAndChildIndexes(self, idx)
def getChildIndex(self):
"""BaseTree doesn't track child indexes."""
return 0
def setChildIndex(self, index):
"""BaseTree doesn't track child indexes."""
pass
def getParent(self):
"""BaseTree doesn't track parent pointers."""
return None
def setParent(self, t):
"""BaseTree doesn't track parent pointers."""
pass
def toStringTree(self):
"""Print out a whole tree not just a node"""
if len(self.children) == 0:
return self.toString()
buf = []
if not self.isNil():
buf.append('(')
buf.append(self.toString())
buf.append(' ')
for i, child in enumerate(self.children):
if i > 0:
buf.append(' ')
buf.append(child.toStringTree())
if not self.isNil():
buf.append(')')
return ''.join(buf)
def getLine(self):
return 0
def getCharPositionInLine(self):
return 0
def toString(self):
"""Override to say how a node (not a tree) should look as text"""
raise NotImplementedError
class BaseTreeAdaptor(TreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
"""
# BaseTreeAdaptor is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def nil(self):
return self.createWithPayload(None)
def errorNode(self, input, start, stop, exc):
"""
create tree node that holds the start and stop tokens associated
with an error.
If you specify your own kind of tree nodes, you will likely have to
override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
if no token payload but you might have to set token type for diff
node type.
"""
return CommonErrorNode(input, start, stop, exc)
def isNil(self, tree):
return tree.isNil()
def dupTree(self, t, parent=None):
"""
This is generic in the sense that it will work with any kind of
tree (not just Tree interface). It invokes the adaptor routines
not the tree node routines to do the construction.
"""
if t is None:
return None
newTree = self.dupNode(t)
# ensure new subtree root has parent/child index set
# same index in new tree
self.setChildIndex(newTree, self.getChildIndex(t))
self.setParent(newTree, parent)
for i in range(self.getChildCount(t)):
child = self.getChild(t, i)
newSubTree = self.dupTree(child, t)
self.addChild(newTree, newSubTree)
return newTree
def addChild(self, tree, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs.
"""
#if isinstance(child, Token):
# child = self.createWithPayload(child)
if tree is not None and child is not None:
tree.addChild(child)
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
if isinstance(newRoot, Token):
newRoot = self.create(newRoot)
if oldRoot is None:
return newRoot
if not isinstance(newRoot, CommonTree):
newRoot = self.createWithPayload(newRoot)
# handle ^(nil real-node)
if newRoot.isNil():
nc = newRoot.getChildCount()
if nc == 1:
newRoot = newRoot.getChild(0)
elif nc > 1:
# TODO: make tree run time exceptions hierarchy
raise RuntimeError("more than one node as root")
# add oldRoot to newRoot; addChild takes care of case where oldRoot
# is a flat list (i.e., nil-rooted tree). All children of oldRoot
# are added to newRoot.
newRoot.addChild(oldRoot)
return newRoot
def rulePostProcessing(self, root):
"""Transform ^(nil x) to x and nil to null"""
if root is not None and root.isNil():
if root.getChildCount() == 0:
root = None
elif root.getChildCount() == 1:
root = root.getChild(0)
# whoever invokes rule will set parent and child index
root.setParent(None)
root.setChildIndex(-1)
return root
def createFromToken(self, tokenType, fromToken, text=None):
assert isinstance(tokenType, (int, long)), type(tokenType).__name__
assert isinstance(fromToken, Token), type(fromToken).__name__
assert text is None or isinstance(text, basestring), type(text).__name__
fromToken = self.createToken(fromToken)
fromToken.type = tokenType
if text is not None:
fromToken.text = text
t = self.createWithPayload(fromToken)
return t
def createFromType(self, tokenType, text):
assert isinstance(tokenType, (int, long)), type(tokenType).__name__
assert isinstance(text, basestring), type(text).__name__
fromToken = self.createToken(tokenType=tokenType, text=text)
t = self.createWithPayload(fromToken)
return t
def getType(self, t):
return t.getType()
def setType(self, t, type):
raise RuntimeError("don't know enough about Tree node")
def getText(self, t):
return t.getText()
def setText(self, t, text):
raise RuntimeError("don't know enough about Tree node")
def getChild(self, t, i):
return t.getChild(i)
def setChild(self, t, i, child):
t.setChild(i, child)
def deleteChild(self, t, i):
return t.deleteChild(i)
def getChildCount(self, t):
return t.getChildCount()
def getUniqueID(self, node):
return hash(node)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
raise NotImplementedError
############################################################################
#
# common tree implementation
#
# Tree
# \- BaseTree
# \- CommonTree
# \- CommonErrorNode
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class CommonTree(BaseTree):
"""@brief A tree node that is wrapper for a Token object.
After 3.0 release
while building tree rewrite stuff, it became clear that computing
parent and child index is very difficult and cumbersome. Better to
spend the space in every tree node. If you don't want these extra
fields, it's easy to cut them out in your own BaseTree subclass.
"""
def __init__(self, payload):
BaseTree.__init__(self)
# What token indexes bracket all tokens associated with this node
# and below?
self.startIndex = -1
self.stopIndex = -1
# Who is the parent node of this node; if null, implies node is root
self.parent = None
# What index is this node in the child list? Range: 0..n-1
self.childIndex = -1
# A single token is the payload
if payload is None:
self.token = None
elif isinstance(payload, CommonTree):
self.token = payload.token
self.startIndex = payload.startIndex
self.stopIndex = payload.stopIndex
elif payload is None or isinstance(payload, Token):
self.token = payload
else:
raise TypeError(type(payload).__name__)
def getToken(self):
return self.token
def dupNode(self):
return CommonTree(self)
def isNil(self):
return self.token is None
def getType(self):
if self.token is None:
return INVALID_TOKEN_TYPE
return self.token.getType()
type = property(getType)
def getText(self):
if self.token is None:
return None
return self.token.text
text = property(getText)
def getLine(self):
if self.token is None or self.token.getLine() == 0:
if self.getChildCount():
return self.getChild(0).getLine()
else:
return 0
return self.token.getLine()
line = property(getLine)
def getCharPositionInLine(self):
if self.token is None or self.token.getCharPositionInLine() == -1:
if self.getChildCount():
return self.getChild(0).getCharPositionInLine()
else:
return 0
else:
return self.token.getCharPositionInLine()
charPositionInLine = property(getCharPositionInLine)
def getTokenStartIndex(self):
if self.startIndex == -1 and self.token is not None:
return self.token.getTokenIndex()
return self.startIndex
def setTokenStartIndex(self, index):
self.startIndex = index
tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex)
def getTokenStopIndex(self):
if self.stopIndex == -1 and self.token is not None:
return self.token.getTokenIndex()
return self.stopIndex
def setTokenStopIndex(self, index):
self.stopIndex = index
tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex)
def getChildIndex(self):
#FIXME: mark as deprecated
return self.childIndex
def setChildIndex(self, idx):
#FIXME: mark as deprecated
self.childIndex = idx
def getParent(self):
#FIXME: mark as deprecated
return self.parent
def setParent(self, t):
#FIXME: mark as deprecated
self.parent = t
def toString(self):
if self.isNil():
return "nil"
if self.getType() == INVALID_TOKEN_TYPE:
return "<errornode>"
return self.token.text
__str__ = toString
def toStringTree(self):
if not self.children:
return self.toString()
ret = ''
if not self.isNil():
ret += '(%s ' % (self.toString())
ret += ' '.join([child.toStringTree() for child in self.children])
if not self.isNil():
ret += ')'
return ret
INVALID_NODE = CommonTree(INVALID_TOKEN)
class CommonErrorNode(CommonTree):
"""A node representing erroneous token range in token stream"""
def __init__(self, input, start, stop, exc):
CommonTree.__init__(self, None)
if (stop is None or
(stop.getTokenIndex() < start.getTokenIndex() and
stop.getType() != EOF
)
):
# sometimes resync does not consume a token (when LT(1) is
# in follow set. So, stop will be 1 to left to start. adjust.
# Also handle case where start is the first token and no token
# is consumed during recovery; LT(-1) will return null.
stop = start
self.input = input
self.start = start
self.stop = stop
self.trappedException = exc
def isNil(self):
return False
def getType(self):
return INVALID_TOKEN_TYPE
def getText(self):
if isinstance(self.start, Token):
i = self.start.getTokenIndex()
j = self.stop.getTokenIndex()
if self.stop.getType() == EOF:
j = self.input.size()
badText = self.input.toString(i, j)
elif isinstance(self.start, Tree):
badText = self.input.toString(self.start, self.stop)
else:
# people should subclass if they alter the tree type so this
# next one is for sure correct.
badText = "<unknown>"
return badText
def toString(self):
if isinstance(self.trappedException, MissingTokenException):
return ("<missing type: "
+ str(self.trappedException.getMissingType())
+ ">")
elif isinstance(self.trappedException, UnwantedTokenException):
return ("<extraneous: "
+ str(self.trappedException.getUnexpectedToken())
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, MismatchedTokenException):
return ("<mismatched token: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, NoViableAltException):
return ("<unexpected: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
return "<error: "+self.getText()+">"
class CommonTreeAdaptor(BaseTreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
It provides
really just factory methods; all the work is done by BaseTreeAdaptor.
If you would like to have different tokens created than ClassicToken
objects, you need to override this and then set the parser tree adaptor to
use your subclass.
To get your parser to build nodes of a different type, override
create(Token).
"""
def dupNode(self, treeNode):
"""
Duplicate a node. This is part of the factory;
override if you want another kind of node to be built.
I could use reflection to prevent having to override this
but reflection is slow.
"""
if treeNode is None:
return None
return treeNode.dupNode()
def createWithPayload(self, payload):
return CommonTree(payload)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
if fromToken is not None:
return CommonToken(oldToken=fromToken)
return CommonToken(type=tokenType, text=text)
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Track start/stop token for subtree root created for a rule.
Only works with Tree nodes. For rules that match nothing,
seems like this will yield start=i and stop=i-1 in a nil node.
Might be useful info so I'll not force to be i..i.
"""
if t is None:
return
start = 0
stop = 0
if startToken is not None:
start = startToken.index
if stopToken is not None:
stop = stopToken.index
t.setTokenStartIndex(start)
t.setTokenStopIndex(stop)
def getTokenStartIndex(self, t):
if t is None:
return -1
return t.getTokenStartIndex()
def getTokenStopIndex(self, t):
if t is None:
return -1
return t.getTokenStopIndex()
def getText(self, t):
if t is None:
return None
return t.getText()
def getType(self, t):
if t is None:
return INVALID_TOKEN_TYPE
return t.getType()
def getToken(self, t):
"""
What is the Token associated with this node? If
you are not using CommonTree, then you must
override this in your own adaptor.
"""
if isinstance(t, CommonTree):
return t.getToken()
return None # no idea what to do
def getChild(self, t, i):
if t is None:
return None
return t.getChild(i)
def getChildCount(self, t):
if t is None:
return 0
return t.getChildCount()
def getParent(self, t):
return t.getParent()
def setParent(self, t, parent):
t.setParent(parent)
def getChildIndex(self, t):
return t.getChildIndex()
def setChildIndex(self, t, index):
t.setChildIndex(index)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
parent.replaceChildren(startChildIndex, stopChildIndex, t)
############################################################################
#
# streams
#
# TreeNodeStream
# \- BaseTree
# \- CommonTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class TreeNodeStream(IntStream):
"""@brief A stream of tree nodes
It accessing nodes from a tree of some kind.
"""
# TreeNodeStream is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def get(self, i):
"""Get a tree node at an absolute index i; 0..n-1.
If you don't want to buffer up nodes, then this method makes no
sense for you.
"""
raise NotImplementedError
def LT(self, k):
"""
Get tree node at current input pointer + i ahead where i=1 is next node.
i<0 indicates nodes in the past. So LT(-1) is previous node, but
implementations are not required to provide results for k < -1.
LT(0) is undefined. For i>=n, return null.
Return null for LT(0) and any index that results in an absolute address
that is negative.
This is analogus to the LT() method of the TokenStream, but this
returns a tree node instead of a token. Makes code gen identical
for both parser and tree grammars. :)
"""
raise NotImplementedError
def getTreeSource(self):
"""
Where is this stream pulling nodes from? This is not the name, but
the object that provides node objects.
"""
raise NotImplementedError
def getTokenStream(self):
"""
If the tree associated with this stream was created from a TokenStream,
you can specify it here. Used to do rule $text attribute in tree
parser. Optional unless you use tree parser rule text attribute
or output=template and rewrite=true options.
"""
raise NotImplementedError
def getTreeAdaptor(self):
"""
What adaptor can tell me how to interpret/navigate nodes and
trees. E.g., get text of a node.
"""
raise NotImplementedError
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so we have to instantiate new ones. When doing normal tree
parsing, it's slow and a waste of memory to create unique
navigation nodes. Default should be false;
"""
raise NotImplementedError
def toString(self, start, stop):
"""
Return the text of all nodes from start to stop, inclusive.
If the stream does not buffer all the nodes then it can still
walk recursively from start until stop. You can always return
null or "" too, but users should not access $ruleLabel.text in
an action of course in that case.
"""
raise NotImplementedError
# REWRITING TREES (used by tree parser)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call. The stream is notified because it is walking the
tree and might need to know you are monkeying with the underlying
tree. Also, it might be able to modify the node stream to avoid
restreaming for future phases.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
class CommonTreeNodeStream(TreeNodeStream):
"""@brief A buffered stream of tree nodes.
Nodes can be from a tree of ANY kind.
This node stream sucks all nodes out of the tree specified in
the constructor during construction and makes pointers into
the tree using an array of Object pointers. The stream necessarily
includes pointers to DOWN and UP and EOF nodes.
This stream knows how to mark/release for backtracking.
This stream is most suitable for tree interpreters that need to
jump around a lot or for tree parsers requiring speed (at cost of memory).
There is some duplicated functionality here with UnBufferedTreeNodeStream
but just in bookkeeping, not tree walking etc...
@see UnBufferedTreeNodeStream
"""
def __init__(self, *args):
TreeNodeStream.__init__(self)
if len(args) == 1:
adaptor = CommonTreeAdaptor()
tree = args[0]
elif len(args) == 2:
adaptor = args[0]
tree = args[1]
else:
raise TypeError("Invalid arguments")
# all these navigation nodes are shared and hence they
# cannot contain any line/column info
self.down = adaptor.createFromType(DOWN, "DOWN")
self.up = adaptor.createFromType(UP, "UP")
self.eof = adaptor.createFromType(EOF, "EOF")
# The complete mapping from stream index to tree node.
# This buffer includes pointers to DOWN, UP, and EOF nodes.
# It is built upon ctor invocation. The elements are type
# Object as we don't what the trees look like.
# Load upon first need of the buffer so we can set token types
# of interest for reverseIndexing. Slows us down a wee bit to
# do all of the if p==-1 testing everywhere though.
self.nodes = []
# Pull nodes from which tree?
self.root = tree
# IF this tree (root) was created from a token stream, track it.
self.tokens = None
# What tree adaptor was used to build these trees
self.adaptor = adaptor
# Reuse same DOWN, UP navigation nodes unless this is true
self.uniqueNavigationNodes = False
# The index into the nodes list of the current node (next node
# to consume). If -1, nodes array not filled yet.
self.p = -1
# Track the last mark() call result value for use in rewind().
self.lastMarker = None
# Stack of indexes used for push/pop calls
self.calls = []
def fillBuffer(self):
"""Walk tree with depth-first-search and fill nodes buffer.
Don't do DOWN, UP nodes if its a list (t is isNil).
"""
self._fillBuffer(self.root)
self.p = 0 # buffer of nodes intialized now
def _fillBuffer(self, t):
nil = self.adaptor.isNil(t)
if not nil:
self.nodes.append(t) # add this node
# add DOWN node if t has children
n = self.adaptor.getChildCount(t)
if not nil and n > 0:
self.addNavigationNode(DOWN)
# and now add all its children
for c in range(n):
self._fillBuffer(self.adaptor.getChild(t, c))
# add UP node if t has children
if not nil and n > 0:
self.addNavigationNode(UP)
def getNodeIndex(self, node):
"""What is the stream index for node? 0..n-1
Return -1 if node not found.
"""
if self.p == -1:
self.fillBuffer()
for i, t in enumerate(self.nodes):
if t == node:
return i
return -1
def addNavigationNode(self, ttype):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so instantiate new ones when uniqueNavigationNodes is true.
"""
navNode = None
if ttype == DOWN:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(DOWN, "DOWN")
else:
navNode = self.down
else:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(UP, "UP")
else:
navNode = self.up
self.nodes.append(navNode)
def get(self, i):
if self.p == -1:
self.fillBuffer()
return self.nodes[i]
def LT(self, k):
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
#System.out.print("LT(p="+p+","+k+")=");
if self.p + k - 1 >= len(self.nodes):
return self.eof
return self.nodes[self.p + k - 1]
def getCurrentSymbol(self):
return self.LT(1)
def LB(self, k):
"""Look backwards k nodes"""
if k == 0:
return None
if self.p - k < 0:
return None
return self.nodes[self.p - k]
def getTreeSource(self):
return self.root
def getSourceName(self):
return self.getTokenStream().getSourceName()
def getTokenStream(self):
return self.tokens
def setTokenStream(self, tokens):
self.tokens = tokens
def getTreeAdaptor(self):
return self.adaptor
def hasUniqueNavigationNodes(self):
return self.uniqueNavigationNodes
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
self.uniqueNavigationNodes = uniqueNavigationNodes
def consume(self):
if self.p == -1:
self.fillBuffer()
self.p += 1
def LA(self, i):
return self.adaptor.getType(self.LT(i))
def mark(self):
if self.p == -1:
self.fillBuffer()
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
if self.p == -1:
self.fillBuffer()
self.p = index
def push(self, index):
"""
Make stream jump to a new location, saving old location.
Switch back with pop().
"""
self.calls.append(self.p) # save current index
self.seek(index)
def pop(self):
"""
Seek back to previous index saved during last push() call.
Return top of stack (return index).
"""
ret = self.calls.pop(-1)
self.seek(ret)
return ret
def reset(self):
self.p = 0
self.lastMarker = 0
self.calls = []
def size(self):
if self.p == -1:
self.fillBuffer()
return len(self.nodes)
# TREE REWRITE INTERFACE
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
self.adaptor.replaceChildren(
parent, startChildIndex, stopChildIndex, t
)
def __str__(self):
"""Used for testing, just return the token type stream"""
if self.p == -1:
self.fillBuffer()
return ' '.join([str(self.adaptor.getType(node))
for node in self.nodes
])
def toString(self, start, stop):
if start is None or stop is None:
return None
if self.p == -1:
self.fillBuffer()
#System.out.println("stop: "+stop);
#if ( start instanceof CommonTree )
# System.out.print("toString: "+((CommonTree)start).getToken()+", ");
#else
# System.out.println(start);
#if ( stop instanceof CommonTree )
# System.out.println(((CommonTree)stop).getToken());
#else
# System.out.println(stop);
# if we have the token stream, use that to dump text in order
if self.tokens is not None:
beginTokenIndex = self.adaptor.getTokenStartIndex(start)
endTokenIndex = self.adaptor.getTokenStopIndex(stop)
# if it's a tree, use start/stop index from start node
# else use token range from start/stop nodes
if self.adaptor.getType(stop) == UP:
endTokenIndex = self.adaptor.getTokenStopIndex(start)
elif self.adaptor.getType(stop) == EOF:
endTokenIndex = self.size() -2 # don't use EOF
return self.tokens.toString(beginTokenIndex, endTokenIndex)
# walk nodes looking for start
i, t = 0, None
for i, t in enumerate(self.nodes):
if t == start:
break
# now walk until we see stop, filling string buffer with text
buf = []
t = self.nodes[i]
while t != stop:
text = self.adaptor.getText(t)
if text is None:
text = " " + self.adaptor.getType(t)
buf.append(text)
i += 1
t = self.nodes[i]
# include stop node too
text = self.adaptor.getText(stop)
if text is None:
text = " " +self.adaptor.getType(stop)
buf.append(text)
return ''.join(buf)
## iterator interface
def __iter__(self):
if self.p == -1:
self.fillBuffer()
for node in self.nodes:
yield node
#############################################################################
#
# tree parser
#
#############################################################################
class TreeParser(BaseRecognizer):
"""@brief Baseclass for generated tree parsers.
A parser for a stream of tree nodes. "tree grammars" result in a subclass
of this. All the error reporting and recovery is shared with Parser via
the BaseRecognizer superclass.
"""
def __init__(self, input, state=None):
BaseRecognizer.__init__(self, state)
self.input = None
self.setTreeNodeStream(input)
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
self.input.seek(0) # rewind the input
def setTreeNodeStream(self, input):
"""Set the input stream"""
self.input = input
def getTreeNodeStream(self):
return self.input
def getSourceName(self):
return self.input.getSourceName()
def getCurrentInputSymbol(self, input):
return input.LT(1)
def getMissingSymbol(self, input, e, expectedTokenType, follow):
tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
return CommonTree(CommonToken(type=expectedTokenType, text=tokenText))
def matchAny(self, ignore): # ignore stream, copy of this.input
"""
Match '.' in tree parser has special meaning. Skip node or
entire tree if node has children. If children, scan until
corresponding UP node.
"""
self._state.errorRecovery = False
look = self.input.LT(1)
if self.input.getTreeAdaptor().getChildCount(look) == 0:
self.input.consume() # not subtree, consume 1 node and return
return
# current node is a subtree, skip to corresponding UP.
# must count nesting level to get right UP
level = 0
tokenType = self.input.getTreeAdaptor().getType(look)
while tokenType != EOF and not (tokenType == UP and level==0):
self.input.consume()
look = self.input.LT(1)
tokenType = self.input.getTreeAdaptor().getType(look)
if tokenType == DOWN:
level += 1
elif tokenType == UP:
level -= 1
self.input.consume() # consume UP
def mismatch(self, input, ttype, follow):
"""
We have DOWN/UP nodes in the stream that have no line info; override.
plus we want to alter the exception type. Don't try to recover
from tree parser errors inline...
"""
raise MismatchedTreeNodeException(ttype, input)
def getErrorHeader(self, e):
"""
Prefix error message with the grammar name because message is
always intended for the programmer because the parser built
the input tree not the user.
"""
return (self.getGrammarFileName() +
": node from %sline %s:%s"
% (['', "after "][e.approximateLineInfo],
e.line,
e.charPositionInLine
)
)
def getErrorMessage(self, e, tokenNames):
"""
Tree parsers parse nodes they usually have a token object as
payload. Set the exception token and do the default behavior.
"""
if isinstance(self, TreeParser):
adaptor = e.input.getTreeAdaptor()
e.token = adaptor.getToken(e.node)
if e.token is not None: # could be an UP/DOWN node
e.token = CommonToken(
type=adaptor.getType(e.node),
text=adaptor.getText(e.node)
)
return BaseRecognizer.getErrorMessage(self, e, tokenNames)
def traceIn(self, ruleName, ruleIndex):
BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
def traceOut(self, ruleName, ruleIndex):
BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
#############################################################################
#
# streams for rule rewriting
#
#############################################################################
class RewriteRuleElementStream(object):
"""@brief Internal helper class.
A generic list of elements tracked in an alternative to be used in
a -> rewrite rule. We need to subclass to fill in the next() method,
which returns either an AST node wrapped around a token payload or
an existing subtree.
Once you start next()ing, do not try to add more elements. It will
break the cursor tracking I believe.
@see org.antlr.runtime.tree.RewriteRuleSubtreeStream
@see org.antlr.runtime.tree.RewriteRuleTokenStream
TODO: add mechanism to detect/puke on modification after reading from
stream
"""
def __init__(self, adaptor, elementDescription, elements=None):
# Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(),
# which bumps it to 1 meaning no more elements.
self.cursor = 0
# Track single elements w/o creating a list. Upon 2nd add, alloc list
self.singleElement = None
# The list of tokens or subtrees we are tracking
self.elements = None
# Once a node / subtree has been used in a stream, it must be dup'd
# from then on. Streams are reset after subrules so that the streams
# can be reused in future subrules. So, reset must set a dirty bit.
# If dirty, then next() always returns a dup.
self.dirty = False
# The element or stream description; usually has name of the token or
# rule reference that this list tracks. Can include rulename too, but
# the exception would track that info.
self.elementDescription = elementDescription
self.adaptor = adaptor
if isinstance(elements, (list, tuple)):
# Create a stream, but feed off an existing list
self.singleElement = None
self.elements = elements
else:
# Create a stream with one element
self.add(elements)
def reset(self):
"""
Reset the condition of this stream so that it appears we have
not consumed any of its elements. Elements themselves are untouched.
Once we reset the stream, any future use will need duplicates. Set
the dirty bit.
"""
self.cursor = 0
self.dirty = True
def add(self, el):
if el is None:
return
if self.elements is not None: # if in list, just add
self.elements.append(el)
return
if self.singleElement is None: # no elements yet, track w/o list
self.singleElement = el
return
# adding 2nd element, move to list
self.elements = []
self.elements.append(self.singleElement)
self.singleElement = None
self.elements.append(el)
def nextTree(self):
"""
Return the next element in the stream. If out of elements, throw
an exception unless size()==1. If size is 1, then return elements[0].
Return a duplicate node/subtree if stream is out of elements and
size==1. If we've already used the element, dup (dirty bit set).
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup
el = self._next()
return self.dup(el)
# test size above then fetch
el = self._next()
return el
def _next(self):
"""
do the work of getting the next element, making sure that it's
a tree node or subtree. Deal with the optimization of single-
element list versus list of size > 1. Throw an exception
if the stream is empty or we're out of elements and size>1.
protected so you can override in a subclass if necessary.
"""
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self): # out of elements?
if len(self) == 1: # if size is 1, it's ok; return and we'll dup
return self.toTree(self.singleElement)
# out of elements and size was not 1, so we can't dup
raise RewriteCardinalityException(self.elementDescription)
# we have elements
if self.singleElement is not None:
self.cursor += 1 # move cursor even for single element list
return self.toTree(self.singleElement)
# must have more than one in list, pull from elements
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
return o
def dup(self, el):
"""
When constructing trees, sometimes we need to dup a token or AST
subtree. Dup'ing a token means just creating another AST node
around it. For trees, you must call the adaptor.dupTree() unless
the element is for a tree root; then it must be a node dup.
"""
raise NotImplementedError
def toTree(self, el):
"""
Ensure stream emits trees; tokens must be converted to AST nodes.
AST nodes can be passed through unmolested.
"""
return el
def hasNext(self):
return ( (self.singleElement is not None and self.cursor < 1)
or (self.elements is not None
and self.cursor < len(self.elements)
)
)
def size(self):
if self.singleElement is not None:
return 1
if self.elements is not None:
return len(self.elements)
return 0
__len__ = size
def getDescription(self):
"""Deprecated. Directly access elementDescription attribute"""
return self.elementDescription
class RewriteRuleTokenStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def toTree(self, el):
# Don't convert to a tree unless they explicitly call nextTree.
# This way we can do hetero tree nodes in rewrite.
return el
def nextNode(self):
t = self._next()
return self.adaptor.createWithPayload(t)
def nextToken(self):
return self._next()
def dup(self, el):
raise TypeError("dup can't be called for a token stream.")
class RewriteRuleSubtreeStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def nextNode(self):
"""
Treat next element as a single node even if it's a subtree.
This is used instead of next() when the result has to be a
tree root node. Also prevents us from duplicating recently-added
children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
must dup the type node, but ID has been added.
Referencing a rule result twice is ok; dup entire tree as
we can't be adding trees as root; e.g., expr expr.
Hideous code duplication here with super.next(). Can't think of
a proper way to refactor. This needs to always call dup node
and super.next() doesn't know which to call: dup node or dup tree.
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup (at most a single node
# since this is for making root nodes).
el = self._next()
return self.adaptor.dupNode(el)
# test size above then fetch
el = self._next()
return el
def dup(self, el):
return self.adaptor.dupTree(el)
class RewriteRuleNodeStream(RewriteRuleElementStream):
"""
Queues up nodes matched on left side of -> in a tree parser. This is
the analog of RewriteRuleTokenStream for normal parsers.
"""
def nextNode(self):
return self._next()
def toTree(self, el):
return self.adaptor.dupNode(el)
def dup(self, el):
# we dup every node, so don't have to worry about calling dup; short-
#circuited next() so it doesn't call.
raise TypeError("dup can't be called for a node stream.")
class TreeRuleReturnScope(RuleReturnScope):
"""
This is identical to the ParserRuleReturnScope except that
the start property is a tree nodes not Token object
when you are parsing trees. To be generic the tree node types
have to be Object.
"""
def __init__(self):
self.start = None
self.tree = None
def getStart(self):
return self.start
def getTree(self):
return self.tree
| Python |
""" @package antlr3.dottreegenerator
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from treewizard import TreeWizard
try:
from antlr3.dottreegen import toDOT
except ImportError, exc:
def toDOT(*args, **kwargs):
raise exc
| Python |
""" @package antlr3.dottreegenerator
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from antlr3.tree import CommonTreeAdaptor
import stringtemplate3
class DOTTreeGenerator(object):
"""
A utility class to generate DOT diagrams (graphviz) from
arbitrary trees. You can pass in your own templates and
can pass in any kind of tree or use Tree interface method.
"""
_treeST = stringtemplate3.StringTemplate(
template=(
"digraph {\n" +
" ordering=out;\n" +
" ranksep=.4;\n" +
" node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" +
" width=.25, height=.25];\n" +
" edge [arrowsize=.5]\n" +
" $nodes$\n" +
" $edges$\n" +
"}\n")
)
_nodeST = stringtemplate3.StringTemplate(
template="$name$ [label=\"$text$\"];\n"
)
_edgeST = stringtemplate3.StringTemplate(
template="$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n"
)
def __init__(self):
## Track node to number mapping so we can get proper node name back
self.nodeToNumberMap = {}
## Track node number so we can get unique node names
self.nodeNumber = 0
def toDOT(self, tree, adaptor=None, treeST=_treeST, edgeST=_edgeST):
if adaptor is None:
adaptor = CommonTreeAdaptor()
treeST = treeST.getInstanceOf()
self.nodeNumber = 0
self.toDOTDefineNodes(tree, adaptor, treeST)
self.nodeNumber = 0
self.toDOTDefineEdges(tree, adaptor, treeST, edgeST)
return treeST
def toDOTDefineNodes(self, tree, adaptor, treeST, knownNodes=None):
if knownNodes is None:
knownNodes = set()
if tree is None:
return
n = adaptor.getChildCount(tree)
if n == 0:
# must have already dumped as child from previous
# invocation; do nothing
return
# define parent node
number = self.getNodeNumber(tree)
if number not in knownNodes:
parentNodeST = self.getNodeST(adaptor, tree)
treeST.setAttribute("nodes", parentNodeST)
knownNodes.add(number)
# for each child, do a "<unique-name> [label=text]" node def
for i in range(n):
child = adaptor.getChild(tree, i)
number = self.getNodeNumber(child)
if number not in knownNodes:
nodeST = self.getNodeST(adaptor, child)
treeST.setAttribute("nodes", nodeST)
knownNodes.add(number)
self.toDOTDefineNodes(child, adaptor, treeST, knownNodes)
def toDOTDefineEdges(self, tree, adaptor, treeST, edgeST):
if tree is None:
return
n = adaptor.getChildCount(tree)
if n == 0:
# must have already dumped as child from previous
# invocation; do nothing
return
parentName = "n%d" % self.getNodeNumber(tree)
# for each child, do a parent -> child edge using unique node names
parentText = adaptor.getText(tree)
for i in range(n):
child = adaptor.getChild(tree, i)
childText = adaptor.getText(child)
childName = "n%d" % self.getNodeNumber(child)
edgeST = edgeST.getInstanceOf()
edgeST.setAttribute("parent", parentName)
edgeST.setAttribute("child", childName)
edgeST.setAttribute("parentText", parentText)
edgeST.setAttribute("childText", childText)
treeST.setAttribute("edges", edgeST)
self.toDOTDefineEdges(child, adaptor, treeST, edgeST)
def getNodeST(self, adaptor, t):
text = adaptor.getText(t)
nodeST = self._nodeST.getInstanceOf()
uniqueName = "n%d" % self.getNodeNumber(t)
nodeST.setAttribute("name", uniqueName)
if text is not None:
text = text.replace('"', r'\\"')
nodeST.setAttribute("text", text)
return nodeST
def getNodeNumber(self, t):
try:
return self.nodeToNumberMap[t]
except KeyError:
self.nodeToNumberMap[t] = self.nodeNumber
self.nodeNumber += 1
return self.nodeNumber - 1
def toDOT(tree, adaptor=None, treeST=DOTTreeGenerator._treeST, edgeST=DOTTreeGenerator._edgeST):
"""
Generate DOT (graphviz) for a whole tree not just a node.
For example, 3+4*5 should generate:
digraph {
node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier",
width=.4, height=.2];
edge [arrowsize=.7]
"+"->3
"+"->"*"
"*"->4
"*"->5
}
Return the ST not a string in case people want to alter.
Takes a Tree interface object.
Example of invokation:
import antlr3
import antlr3.extras
input = antlr3.ANTLRInputStream(sys.stdin)
lex = TLexer(input)
tokens = antlr3.CommonTokenStream(lex)
parser = TParser(tokens)
tree = parser.e().tree
print tree.toStringTree()
st = antlr3.extras.toDOT(t)
print st
"""
gen = DOTTreeGenerator()
return gen.toDOT(tree, adaptor, treeST, edgeST)
| Python |
"""ANTLR3 exception hierarchy"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import INVALID_TOKEN_TYPE
class BacktrackingFailed(Exception):
"""@brief Raised to signal failed backtrack attempt"""
pass
class RecognitionException(Exception):
"""@brief The root of the ANTLR exception hierarchy.
To avoid English-only error messages and to generally make things
as flexible as possible, these exceptions are not created with strings,
but rather the information necessary to generate an error. Then
the various reporting methods in Parser and Lexer can be overridden
to generate a localized error message. For example, MismatchedToken
exceptions are built with the expected token type.
So, don't expect getMessage() to return anything.
Note that as of Java 1.4, you can access the stack trace, which means
that you can compute the complete trace of rules from the start symbol.
This gives you considerable context information with which to generate
useful error messages.
ANTLR generates code that throws exceptions upon recognition error and
also generates code to catch these exceptions in each rule. If you
want to quit upon first error, you can turn off the automatic error
handling mechanism using rulecatch action, but you still need to
override methods mismatch and recoverFromMismatchSet.
In general, the recognition exceptions can track where in a grammar a
problem occurred and/or what was the expected input. While the parser
knows its state (such as current input symbol and line info) that
state can change before the exception is reported so current token index
is computed and stored at exception time. From this info, you can
perhaps print an entire line of input not just a single token, for example.
Better to just say the recognizer had a problem and then let the parser
figure out a fancy report.
"""
def __init__(self, input=None):
Exception.__init__(self)
# What input stream did the error occur in?
self.input = None
# What is index of token/char were we looking at when the error
# occurred?
self.index = None
# The current Token when an error occurred. Since not all streams
# can retrieve the ith Token, we have to track the Token object.
# For parsers. Even when it's a tree parser, token might be set.
self.token = None
# If this is a tree parser exception, node is set to the node with
# the problem.
self.node = None
# The current char when an error occurred. For lexers.
self.c = None
# Track the line at which the error occurred in case this is
# generated from a lexer. We need to track this since the
# unexpected char doesn't carry the line info.
self.line = None
self.charPositionInLine = None
# If you are parsing a tree node stream, you will encounter som
# imaginary nodes w/o line/col info. We now search backwards looking
# for most recent token with line/col info, but notify getErrorHeader()
# that info is approximate.
self.approximateLineInfo = False
if input is not None:
self.input = input
self.index = input.index()
# late import to avoid cyclic dependencies
from antlr3.streams import TokenStream, CharStream
from antlr3.tree import TreeNodeStream
if isinstance(self.input, TokenStream):
self.token = self.input.LT(1)
self.line = self.token.line
self.charPositionInLine = self.token.charPositionInLine
if isinstance(self.input, TreeNodeStream):
self.extractInformationFromTreeNodeStream(self.input)
else:
if isinstance(self.input, CharStream):
self.c = self.input.LT(1)
self.line = self.input.line
self.charPositionInLine = self.input.charPositionInLine
else:
self.c = self.input.LA(1)
def extractInformationFromTreeNodeStream(self, nodes):
from antlr3.tree import Tree, CommonTree
from antlr3.tokens import CommonToken
self.node = nodes.LT(1)
adaptor = nodes.adaptor
payload = adaptor.getToken(self.node)
if payload is not None:
self.token = payload
if payload.line <= 0:
# imaginary node; no line/pos info; scan backwards
i = -1
priorNode = nodes.LT(i)
while priorNode is not None:
priorPayload = adaptor.getToken(priorNode)
if priorPayload is not None and priorPayload.line > 0:
# we found the most recent real line / pos info
self.line = priorPayload.line
self.charPositionInLine = priorPayload.charPositionInLine
self.approximateLineInfo = True
break
i -= 1
priorNode = nodes.LT(i)
else: # node created from real token
self.line = payload.line
self.charPositionInLine = payload.charPositionInLine
elif isinstance(self.node, Tree):
self.line = self.node.line
self.charPositionInLine = self.node.charPositionInLine
if isinstance(self.node, CommonTree):
self.token = self.node.token
else:
type = adaptor.getType(self.node)
text = adaptor.getText(self.node)
self.token = CommonToken(type=type, text=text)
def getUnexpectedType(self):
"""Return the token type or char of the unexpected input element"""
from antlr3.streams import TokenStream
from antlr3.tree import TreeNodeStream
if isinstance(self.input, TokenStream):
return self.token.type
elif isinstance(self.input, TreeNodeStream):
adaptor = self.input.treeAdaptor
return adaptor.getType(self.node)
else:
return self.c
unexpectedType = property(getUnexpectedType)
class MismatchedTokenException(RecognitionException):
"""@brief A mismatched char or Token or tree node."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
#return "MismatchedTokenException("+self.expecting+")"
return "MismatchedTokenException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class UnwantedTokenException(MismatchedTokenException):
"""An extra token while parsing a TokenStream"""
def getUnexpectedToken(self):
return self.token
def __str__(self):
exp = ", expected %s" % self.expecting
if self.expecting == INVALID_TOKEN_TYPE:
exp = ""
if self.token is None:
return "UnwantedTokenException(found=%s%s)" % (None, exp)
return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp)
__repr__ = __str__
class MissingTokenException(MismatchedTokenException):
"""
We were expecting a token but it's not found. The current token
is actually what we wanted next.
"""
def __init__(self, expecting, input, inserted):
MismatchedTokenException.__init__(self, expecting, input)
self.inserted = inserted
def getMissingType(self):
return self.expecting
def __str__(self):
if self.inserted is not None and self.token is not None:
return "MissingTokenException(inserted %r at %r)" % (
self.inserted, self.token.text)
if self.token is not None:
return "MissingTokenException(at %r)" % self.token.text
return "MissingTokenException"
__repr__ = __str__
class MismatchedRangeException(RecognitionException):
"""@brief The next token does not match a range of expected types."""
def __init__(self, a, b, input):
RecognitionException.__init__(self, input)
self.a = a
self.b = b
def __str__(self):
return "MismatchedRangeException(%r not in [%r..%r])" % (
self.getUnexpectedType(), self.a, self.b
)
__repr__ = __str__
class MismatchedSetException(RecognitionException):
"""@brief The next token does not match a set of expected types."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedSetException(%r not in %r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class MismatchedNotSetException(MismatchedSetException):
"""@brief Used for remote debugger deserialization"""
def __str__(self):
return "MismatchedNotSetException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class NoViableAltException(RecognitionException):
"""@brief Unable to decide which alternative to choose."""
def __init__(
self, grammarDecisionDescription, decisionNumber, stateNumber, input
):
RecognitionException.__init__(self, input)
self.grammarDecisionDescription = grammarDecisionDescription
self.decisionNumber = decisionNumber
self.stateNumber = stateNumber
def __str__(self):
return "NoViableAltException(%r!=[%r])" % (
self.unexpectedType, self.grammarDecisionDescription
)
__repr__ = __str__
class EarlyExitException(RecognitionException):
"""@brief The recognizer did not match anything for a (..)+ loop."""
def __init__(self, decisionNumber, input):
RecognitionException.__init__(self, input)
self.decisionNumber = decisionNumber
class FailedPredicateException(RecognitionException):
"""@brief A semantic predicate failed during validation.
Validation of predicates
occurs when normally parsing the alternative just like matching a token.
Disambiguating predicate evaluation occurs when we hoist a predicate into
a prediction decision.
"""
def __init__(self, input, ruleName, predicateText):
RecognitionException.__init__(self, input)
self.ruleName = ruleName
self.predicateText = predicateText
def __str__(self):
return "FailedPredicateException("+self.ruleName+",{"+self.predicateText+"}?)"
__repr__ = __str__
class MismatchedTreeNodeException(RecognitionException):
"""@brief The next tree mode does not match the expected type."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedTreeNodeException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
| Python |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from antlr3.constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE
############################################################################
#
# basic token interface
#
############################################################################
class Token(object):
"""@brief Abstract token baseclass."""
def getText(self):
"""@brief Get the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def setText(self, text):
"""@brief Set the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def getType(self):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def setType(self, ttype):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def getLine(self):
"""@brief Get the line number on which this token was matched
Lines are numbered 1..n
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def setLine(self, line):
"""@brief Set the line number on which this token was matched
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def getCharPositionInLine(self):
"""@brief Get the column of the tokens first character,
Columns are numbered 0..n-1
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def setCharPositionInLine(self, pos):
"""@brief Set the column of the tokens first character,
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def getChannel(self):
"""@brief Get the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def setChannel(self, channel):
"""@brief Set the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def getTokenIndex(self):
"""@brief Get the index in the input stream.
An index from 0..n-1 of the token object in the input stream.
This must be valid in order to use the ANTLRWorks debugger.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def setTokenIndex(self, index):
"""@brief Set the index in the input stream.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def getInputStream(self):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
def setInputStream(self, input):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
############################################################################
#
# token implementations
#
# Token
# +- CommonToken
# \- ClassicToken
#
############################################################################
class CommonToken(Token):
"""@brief Basic token implementation.
This implementation does not copy the text from the input stream upon
creation, but keeps start/stop pointers into the stream to avoid
unnecessary copy operations.
"""
def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
input=None, start=None, stop=None, oldToken=None):
Token.__init__(self)
if oldToken is not None:
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.index = oldToken.index
self._text = oldToken._text
if isinstance(oldToken, CommonToken):
self.input = oldToken.input
self.start = oldToken.start
self.stop = oldToken.stop
else:
self.type = type
self.input = input
self.charPositionInLine = -1 # set to invalid position
self.line = 0
self.channel = channel
#What token number is this from 0..n-1 tokens; < 0 implies invalid index
self.index = -1
# We need to be able to change the text once in a while. If
# this is non-null, then getText should return this. Note that
# start/stop are not affected by changing this.
self._text = text
# The char position into the input buffer where this token starts
self.start = start
# The char position into the input buffer where this token stops
# This is the index of the last char, *not* the index after it!
self.stop = stop
def getText(self):
if self._text is not None:
return self._text
if self.input is None:
return None
return self.input.substring(self.start, self.stop)
def setText(self, text):
"""
Override the text for this token. getText() will return this text
rather than pulling from the buffer. Note that this does not mean
that start/stop indexes are not valid. It means that that input
was converted to a new string in the token object.
"""
self._text = text
text = property(getText, setText)
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return self.input
def setInputStream(self, input):
self.input = input
def __str__(self):
if self.type == EOF:
return "<EOF>"
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is not None:
txt = txt.replace("\n","\\\\n")
txt = txt.replace("\r","\\\\r")
txt = txt.replace("\t","\\\\t")
else:
txt = "<no text>"
return "[@%d,%d:%d=%r,<%d>%s,%d:%d]" % (
self.index,
self.start, self.stop,
txt,
self.type, channelStr,
self.line, self.charPositionInLine
)
class ClassicToken(Token):
"""@brief Alternative token implementation.
A Token object like we'd use in ANTLR 2.x; has an actual string created
and associated with this object. These objects are needed for imaginary
tree nodes that have payload objects. We need to create a Token object
that has a string; the tree node will point at this token. CommonToken
has indexes into a char stream and hence cannot be used to introduce
new strings.
"""
def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL,
oldToken=None
):
Token.__init__(self)
if oldToken is not None:
self.text = oldToken.text
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.text = text
self.type = type
self.line = None
self.charPositionInLine = None
self.channel = channel
self.index = None
def getText(self):
return self.text
def setText(self, text):
self.text = text
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return None
def setInputStream(self, input):
pass
def toString(self):
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is None:
txt = "<no text>"
return "[@%r,%r,<%r>%s,%r:%r]" % (self.index,
txt,
self.type,
channelStr,
self.line,
self.charPositionInLine
)
__str__ = toString
__repr__ = toString
EOF_TOKEN = CommonToken(type=EOF)
INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
# will avoid creating a token for this symbol and try to fetch another.
SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
| Python |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import codecs
from StringIO import StringIO
from antlr3.constants import DEFAULT_CHANNEL, EOF
from antlr3.tokens import Token, EOF_TOKEN
############################################################################
#
# basic interfaces
# IntStream
# +- CharStream
# \- TokenStream
#
# subclasses must implemented all methods
#
############################################################################
class IntStream(object):
"""
@brief Base interface for streams of integer values.
A simple stream of integers used when all I care about is the char
or token type sequence (such as interpretation).
"""
def consume(self):
raise NotImplementedError
def LA(self, i):
"""Get int at current input pointer + i ahead where i=1 is next int.
Negative indexes are allowed. LA(-1) is previous token (token
just matched). LA(-i) where i is before first token should
yield -1, invalid char / EOF.
"""
raise NotImplementedError
def mark(self):
"""
Tell the stream to start buffering if it hasn't already. Return
current input position, index(), or some other marker so that
when passed to rewind() you get back to the same spot.
rewind(mark()) should not affect the input cursor. The Lexer
track line/col info as well as input index so its markers are
not pure input indexes. Same for tree node streams.
"""
raise NotImplementedError
def index(self):
"""
Return the current input symbol index 0..n where n indicates the
last symbol has been read. The index is the symbol about to be
read not the most recently read symbol.
"""
raise NotImplementedError
def rewind(self, marker=None):
"""
Reset the stream so that next call to index would return marker.
The marker will usually be index() but it doesn't have to be. It's
just a marker to indicate what state the stream was in. This is
essentially calling release() and seek(). If there are markers
created after this marker argument, this routine must unroll them
like a stack. Assume the state the stream was in when this marker
was created.
If marker is None:
Rewind to the input position of the last marker.
Used currently only after a cyclic DFA and just
before starting a sem/syn predicate to get the
input position back to the start of the decision.
Do not "pop" the marker off the state. mark(i)
and rewind(i) should balance still. It is
like invoking rewind(last marker) but it should not "pop"
the marker off. It's like seek(last marker's input position).
"""
raise NotImplementedError
def release(self, marker=None):
"""
You may want to commit to a backtrack but don't want to force the
stream to keep bookkeeping objects around for a marker that is
no longer necessary. This will have the same behavior as
rewind() except it releases resources without the backward seek.
This must throw away resources for all markers back to the marker
argument. So if you're nested 5 levels of mark(), and then release(2)
you have to release resources for depths 2..5.
"""
raise NotImplementedError
def seek(self, index):
"""
Set the input cursor to the position indicated by index. This is
normally used to seek ahead in the input stream. No buffering is
required to do this unless you know your stream will use seek to
move backwards such as when backtracking.
This is different from rewind in its multi-directional
requirement and in that its argument is strictly an input cursor
(index).
For char streams, seeking forward must update the stream state such
as line number. For seeking backwards, you will be presumably
backtracking using the mark/rewind mechanism that restores state and
so this method does not need to update state when seeking backwards.
Currently, this method is only used for efficient backtracking using
memoization, but in the future it may be used for incremental parsing.
The index is 0..n-1. A seek to position i means that LA(1) will
return the ith symbol. So, seeking to 0 means LA(1) will return the
first element in the stream.
"""
raise NotImplementedError
def size(self):
"""
Only makes sense for streams that buffer everything up probably, but
might be useful to display the entire stream or for testing. This
value includes a single EOF.
"""
raise NotImplementedError
def getSourceName(self):
"""
Where are you getting symbols from? Normally, implementations will
pass the buck all the way to the lexer who can ask its input stream
for the file name or whatever.
"""
raise NotImplementedError
class CharStream(IntStream):
"""
@brief A source of characters for an ANTLR lexer.
This is an abstract class that must be implemented by a subclass.
"""
# pylint does not realize that this is an interface, too
#pylint: disable-msg=W0223
EOF = -1
def substring(self, start, stop):
"""
For infinite streams, you don't need this; primarily I'm providing
a useful interface for action code. Just make sure actions don't
use this on streams that don't support it.
"""
raise NotImplementedError
def LT(self, i):
"""
Get the ith character of lookahead. This is the same usually as
LA(i). This will be used for labels in the generated
lexer code. I'd prefer to return a char here type-wise, but it's
probably better to be 32-bit clean and be consistent with LA.
"""
raise NotImplementedError
def getLine(self):
"""ANTLR tracks the line information automatically"""
raise NotImplementedError
def setLine(self, line):
"""
Because this stream can rewind, we need to be able to reset the line
"""
raise NotImplementedError
def getCharPositionInLine(self):
"""
The index of the character relative to the beginning of the line 0..n-1
"""
raise NotImplementedError
def setCharPositionInLine(self, pos):
raise NotImplementedError
class TokenStream(IntStream):
"""
@brief A stream of tokens accessing tokens from a TokenSource
This is an abstract class that must be implemented by a subclass.
"""
# pylint does not realize that this is an interface, too
#pylint: disable-msg=W0223
def LT(self, k):
"""
Get Token at current input pointer + i ahead where i=1 is next Token.
i<0 indicates tokens in the past. So -1 is previous token and -2 is
two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken.
Return null for LT(0) and any index that results in an absolute address
that is negative.
"""
raise NotImplementedError
def get(self, i):
"""
Get a token at an absolute index i; 0..n-1. This is really only
needed for profiling and debugging and token stream rewriting.
If you don't want to buffer up tokens, then this method makes no
sense for you. Naturally you can't use the rewrite stream feature.
I believe DebugTokenStream can easily be altered to not use
this method, removing the dependency.
"""
raise NotImplementedError
def getTokenSource(self):
"""
Where is this stream pulling tokens from? This is not the name, but
the object that provides Token objects.
"""
raise NotImplementedError
def toString(self, start=None, stop=None):
"""
Return the text of all tokens from start to stop, inclusive.
If the stream does not buffer all the tokens then it can just
return "" or null; Users should not access $ruleLabel.text in
an action of course in that case.
Because the user is not required to use a token with an index stored
in it, we must provide a means for two token objects themselves to
indicate the start/end location. Most often this will just delegate
to the other toString(int,int). This is also parallel with
the TreeNodeStream.toString(Object,Object).
"""
raise NotImplementedError
############################################################################
#
# character streams for use in lexers
# CharStream
# \- ANTLRStringStream
#
############################################################################
class ANTLRStringStream(CharStream):
"""
@brief CharStream that pull data from a unicode string.
A pretty quick CharStream that pulls all data from an array
directly. Every method call counts in the lexer.
"""
def __init__(self, data):
"""
@param data This should be a unicode string holding the data you want
to parse. If you pass in a byte string, the Lexer will choke on
non-ascii data.
"""
CharStream.__init__(self)
# The data being scanned
self.strdata = unicode(data)
self.data = [ord(c) for c in self.strdata]
# How many characters are actually in the buffer
self.n = len(data)
# 0..n-1 index into string of next char
self.p = 0
# line number 1..n within the input
self.line = 1
# The index of the character relative to the beginning of the
# line 0..n-1
self.charPositionInLine = 0
# A list of CharStreamState objects that tracks the stream state
# values line, charPositionInLine, and p that can change as you
# move through the input stream. Indexed from 0..markDepth-1.
self._markers = [ ]
self.lastMarker = None
self.markDepth = 0
# What is name or source of this char stream?
self.name = None
def reset(self):
"""
Reset the stream so that it's in the same state it was
when the object was created *except* the data array is not
touched.
"""
self.p = 0
self.line = 1
self.charPositionInLine = 0
self._markers = [ ]
def consume(self):
try:
if self.data[self.p] == 10: # \n
self.line += 1
self.charPositionInLine = 0
else:
self.charPositionInLine += 1
self.p += 1
except IndexError:
# happend when we reached EOF and self.data[self.p] fails
# just do nothing
pass
def LA(self, i):
if i == 0:
return 0 # undefined
if i < 0:
i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
try:
return self.data[self.p+i-1]
except IndexError:
return EOF
def LT(self, i):
if i == 0:
return 0 # undefined
if i < 0:
i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
try:
return self.strdata[self.p+i-1]
except IndexError:
return EOF
def index(self):
"""
Return the current input symbol index 0..n where n indicates the
last symbol has been read. The index is the index of char to
be returned from LA(1).
"""
return self.p
def size(self):
return self.n
def mark(self):
state = (self.p, self.line, self.charPositionInLine)
try:
self._markers[self.markDepth] = state
except IndexError:
self._markers.append(state)
self.markDepth += 1
self.lastMarker = self.markDepth
return self.lastMarker
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
p, line, charPositionInLine = self._markers[marker-1]
self.seek(p)
self.line = line
self.charPositionInLine = charPositionInLine
self.release(marker)
def release(self, marker=None):
if marker is None:
marker = self.lastMarker
self.markDepth = marker-1
def seek(self, index):
"""
consume() ahead until p==index; can't just set p=index as we must
update line and charPositionInLine.
"""
if index <= self.p:
self.p = index # just jump; don't update stream state (line, ...)
return
# seek forward, consume until p hits index
while self.p < index:
self.consume()
def substring(self, start, stop):
return self.strdata[start:stop+1]
def getLine(self):
"""Using setter/getter methods is deprecated. Use o.line instead."""
return self.line
def getCharPositionInLine(self):
"""
Using setter/getter methods is deprecated. Use o.charPositionInLine
instead.
"""
return self.charPositionInLine
def setLine(self, line):
"""Using setter/getter methods is deprecated. Use o.line instead."""
self.line = line
def setCharPositionInLine(self, pos):
"""
Using setter/getter methods is deprecated. Use o.charPositionInLine
instead.
"""
self.charPositionInLine = pos
def getSourceName(self):
return self.name
class ANTLRFileStream(ANTLRStringStream):
"""
@brief CharStream that opens a file to read the data.
This is a char buffer stream that is loaded from a file
all at once when you construct the object.
"""
def __init__(self, fileName, encoding=None):
"""
@param fileName The path to the file to be opened. The file will be
opened with mode 'rb'.
@param encoding If you set the optional encoding argument, then the
data will be decoded on the fly.
"""
self.fileName = fileName
fp = codecs.open(fileName, 'rb', encoding)
try:
data = fp.read()
finally:
fp.close()
ANTLRStringStream.__init__(self, data)
def getSourceName(self):
"""Deprecated, access o.fileName directly."""
return self.fileName
class ANTLRInputStream(ANTLRStringStream):
"""
@brief CharStream that reads data from a file-like object.
This is a char buffer stream that is loaded from a file like object
all at once when you construct the object.
All input is consumed from the file, but it is not closed.
"""
def __init__(self, file, encoding=None):
"""
@param file A file-like object holding your input. Only the read()
method must be implemented.
@param encoding If you set the optional encoding argument, then the
data will be decoded on the fly.
"""
if encoding is not None:
# wrap input in a decoding reader
reader = codecs.lookup(encoding)[2]
file = reader(file)
data = file.read()
ANTLRStringStream.__init__(self, data)
# I guess the ANTLR prefix exists only to avoid a name clash with some Java
# mumbojumbo. A plain "StringStream" looks better to me, which should be
# the preferred name in Python.
StringStream = ANTLRStringStream
FileStream = ANTLRFileStream
InputStream = ANTLRInputStream
############################################################################
#
# Token streams
# TokenStream
# +- CommonTokenStream
# \- TokenRewriteStream
#
############################################################################
class CommonTokenStream(TokenStream):
"""
@brief The most common stream of tokens
The most common stream of tokens is one where every token is buffered up
and tokens are prefiltered for a certain channel (the parser will only
see these tokens and cannot change the filter channel number during the
parse).
"""
def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
"""
@param tokenSource A TokenSource instance (usually a Lexer) to pull
the tokens from.
@param channel Skip tokens on any channel but this one; this is how we
skip whitespace...
"""
TokenStream.__init__(self)
self.tokenSource = tokenSource
# Record every single token pulled from the source so we can reproduce
# chunks of it later.
self.tokens = []
# Map<tokentype, channel> to override some Tokens' channel numbers
self.channelOverrideMap = {}
# Set<tokentype>; discard any tokens with this type
self.discardSet = set()
# Skip tokens on any channel but this one; this is how we skip whitespace...
self.channel = channel
# By default, track all incoming tokens
self.discardOffChannelTokens = False
# The index into the tokens list of the current token (next token
# to consume). p==-1 indicates that the tokens list is empty
self.p = -1
# Remember last marked position
self.lastMarker = None
def setTokenSource(self, tokenSource):
"""Reset this token stream by setting its token source."""
self.tokenSource = tokenSource
self.tokens = []
self.p = -1
self.channel = DEFAULT_CHANNEL
def reset(self):
self.p = 0
self.lastMarker = None
def fillBuffer(self):
"""
Load all tokens from the token source and put in tokens.
This is done upon first LT request because you might want to
set some token type / channel overrides before filling buffer.
"""
index = 0
t = self.tokenSource.nextToken()
while t is not None and t.type != EOF:
discard = False
if self.discardSet is not None and t.type in self.discardSet:
discard = True
elif self.discardOffChannelTokens and t.channel != self.channel:
discard = True
# is there a channel override for token type?
try:
overrideChannel = self.channelOverrideMap[t.type]
except KeyError:
# no override for this type
pass
else:
if overrideChannel == self.channel:
t.channel = overrideChannel
else:
discard = True
if not discard:
t.index = index
self.tokens.append(t)
index += 1
t = self.tokenSource.nextToken()
# leave p pointing at first token on channel
self.p = 0
self.p = self.skipOffTokenChannels(self.p)
def consume(self):
"""
Move the input pointer to the next incoming token. The stream
must become active with LT(1) available. consume() simply
moves the input pointer so that LT(1) points at the next
input symbol. Consume at least one token.
Walk past any token not on the channel the parser is listening to.
"""
if self.p < len(self.tokens):
self.p += 1
self.p = self.skipOffTokenChannels(self.p) # leave p on valid token
def skipOffTokenChannels(self, i):
"""
Given a starting index, return the index of the first on-channel
token.
"""
try:
while self.tokens[i].channel != self.channel:
i += 1
except IndexError:
# hit the end of token stream
pass
return i
def skipOffTokenChannelsReverse(self, i):
while i >= 0 and self.tokens[i].channel != self.channel:
i -= 1
return i
def setTokenTypeChannel(self, ttype, channel):
"""
A simple filter mechanism whereby you can tell this token stream
to force all tokens of type ttype to be on channel. For example,
when interpreting, we cannot exec actions so we need to tell
the stream to force all WS and NEWLINE to be a different, ignored
channel.
"""
self.channelOverrideMap[ttype] = channel
def discardTokenType(self, ttype):
self.discardSet.add(ttype)
def getTokens(self, start=None, stop=None, types=None):
"""
Given a start and stop index, return a list of all tokens in
the token type set. Return None if no tokens were found. This
method looks at both on and off channel tokens.
"""
if self.p == -1:
self.fillBuffer()
if stop is None or stop >= len(self.tokens):
stop = len(self.tokens) - 1
if start is None or stop < 0:
start = 0
if start > stop:
return None
if isinstance(types, (int, long)):
# called with a single type, wrap into set
types = set([types])
filteredTokens = [
token for token in self.tokens[start:stop]
if types is None or token.type in types
]
if len(filteredTokens) == 0:
return None
return filteredTokens
def LT(self, k):
"""
Get the ith token from the current position 1..n where k=1 is the
first symbol of lookahead.
"""
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
i = self.p
n = 1
# find k good tokens
while n < k:
# skip off-channel tokens
i = self.skipOffTokenChannels(i+1) # leave p on valid token
n += 1
try:
return self.tokens[i]
except IndexError:
return EOF_TOKEN
def LB(self, k):
"""Look backwards k tokens on-channel tokens"""
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if self.p - k < 0:
return None
i = self.p
n = 1
# find k good tokens looking backwards
while n <= k:
# skip off-channel tokens
i = self.skipOffTokenChannelsReverse(i-1) # leave p on valid token
n += 1
if i < 0:
return None
return self.tokens[i]
def get(self, i):
"""
Return absolute token i; ignore which channel the tokens are on;
that is, count all tokens not just on-channel tokens.
"""
return self.tokens[i]
def LA(self, i):
return self.LT(i).type
def mark(self):
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def size(self):
return len(self.tokens)
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
self.p = index
def getTokenSource(self):
return self.tokenSource
def getSourceName(self):
return self.tokenSource.getSourceName()
def toString(self, start=None, stop=None):
if self.p == -1:
self.fillBuffer()
if start is None:
start = 0
elif not isinstance(start, int):
start = start.index
if stop is None:
stop = len(self.tokens) - 1
elif not isinstance(stop, int):
stop = stop.index
if stop >= len(self.tokens):
stop = len(self.tokens) - 1
return ''.join([t.text for t in self.tokens[start:stop+1]])
class RewriteOperation(object):
"""@brief Internal helper class."""
def __init__(self, stream, index, text):
self.stream = stream
self.index = index
self.text = text
def execute(self, buf):
"""Execute the rewrite operation by possibly adding to the buffer.
Return the index of the next token to operate on.
"""
return self.index
def toString(self):
opName = self.__class__.__name__
return '<%s@%d:"%s">' % (opName, self.index, self.text)
__str__ = toString
__repr__ = toString
class InsertBeforeOp(RewriteOperation):
"""@brief Internal helper class."""
def execute(self, buf):
buf.write(self.text)
buf.write(self.stream.tokens[self.index].text)
return self.index + 1
class ReplaceOp(RewriteOperation):
"""
@brief Internal helper class.
I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
instructions.
"""
def __init__(self, stream, first, last, text):
RewriteOperation.__init__(self, stream, first, text)
self.lastIndex = last
def execute(self, buf):
if self.text is not None:
buf.write(self.text)
return self.lastIndex + 1
def toString(self):
return '<ReplaceOp@%d..%d:"%s">' % (
self.index, self.lastIndex, self.text)
__str__ = toString
__repr__ = toString
class DeleteOp(ReplaceOp):
"""
@brief Internal helper class.
"""
def __init__(self, stream, first, last):
ReplaceOp.__init__(self, stream, first, last, None)
def toString(self):
return '<DeleteOp@%d..%d>' % (self.index, self.lastIndex)
__str__ = toString
__repr__ = toString
class TokenRewriteStream(CommonTokenStream):
"""@brief CommonTokenStream that can be modified.
Useful for dumping out the input stream after doing some
augmentation or other manipulations.
You can insert stuff, replace, and delete chunks. Note that the
operations are done lazily--only if you convert the buffer to a
String. This is very efficient because you are not moving data around
all the time. As the buffer of tokens is converted to strings, the
toString() method(s) check to see if there is an operation at the
current index. If so, the operation is done and then normal String
rendering continues on the buffer. This is like having multiple Turing
machine instruction streams (programs) operating on a single input tape. :)
Since the operations are done lazily at toString-time, operations do not
screw up the token index values. That is, an insert operation at token
index i does not change the index values for tokens i+1..n-1.
Because operations never actually alter the buffer, you may always get
the original token stream back without undoing anything. Since
the instructions are queued up, you can easily simulate transactions and
roll back any changes if there is an error just by removing instructions.
For example,
CharStream input = new ANTLRFileStream("input");
TLexer lex = new TLexer(input);
TokenRewriteStream tokens = new TokenRewriteStream(lex);
T parser = new T(tokens);
parser.startRule();
Then in the rules, you can execute
Token t,u;
...
input.insertAfter(t, "text to put after t");}
input.insertAfter(u, "text after u");}
System.out.println(tokens.toString());
Actually, you have to cast the 'input' to a TokenRewriteStream. :(
You can also have multiple "instruction streams" and get multiple
rewrites from a single pass over the input. Just name the instruction
streams and use that name again when printing the buffer. This could be
useful for generating a C file and also its header file--all from the
same buffer:
tokens.insertAfter("pass1", t, "text to put after t");}
tokens.insertAfter("pass2", u, "text after u");}
System.out.println(tokens.toString("pass1"));
System.out.println(tokens.toString("pass2"));
If you don't use named rewrite streams, a "default" stream is used as
the first example shows.
"""
DEFAULT_PROGRAM_NAME = "default"
MIN_TOKEN_INDEX = 0
def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
CommonTokenStream.__init__(self, tokenSource, channel)
# You may have multiple, named streams of rewrite operations.
# I'm calling these things "programs."
# Maps String (name) -> rewrite (List)
self.programs = {}
self.programs[self.DEFAULT_PROGRAM_NAME] = []
# Map String (program name) -> Integer index
self.lastRewriteTokenIndexes = {}
def rollback(self, *args):
"""
Rollback the instruction stream for a program so that
the indicated instruction (via instructionIndex) is no
longer in the stream. UNTESTED!
"""
if len(args) == 2:
programName = args[0]
instructionIndex = args[1]
elif len(args) == 1:
programName = self.DEFAULT_PROGRAM_NAME
instructionIndex = args[0]
else:
raise TypeError("Invalid arguments")
p = self.programs.get(programName, None)
if p is not None:
self.programs[programName] = (
p[self.MIN_TOKEN_INDEX:instructionIndex])
def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME):
"""Reset the program so that no instructions exist"""
self.rollback(programName, self.MIN_TOKEN_INDEX)
def insertAfter(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
index = args[0]
text = args[1]
elif len(args) == 3:
programName = args[0]
index = args[1]
text = args[2]
else:
raise TypeError("Invalid arguments")
if isinstance(index, Token):
# index is a Token, grap the stream index from it
index = index.index
# to insert after, just insert before next index (even if past end)
self.insertBefore(programName, index+1, text)
def insertBefore(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
index = args[0]
text = args[1]
elif len(args) == 3:
programName = args[0]
index = args[1]
text = args[2]
else:
raise TypeError("Invalid arguments")
if isinstance(index, Token):
# index is a Token, grap the stream index from it
index = index.index
op = InsertBeforeOp(self, index, text)
rewrites = self.getProgram(programName)
rewrites.append(op)
def replace(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
first = args[0]
last = args[0]
text = args[1]
elif len(args) == 3:
programName = self.DEFAULT_PROGRAM_NAME
first = args[0]
last = args[1]
text = args[2]
elif len(args) == 4:
programName = args[0]
first = args[1]
last = args[2]
text = args[3]
else:
raise TypeError("Invalid arguments")
if isinstance(first, Token):
# first is a Token, grap the stream index from it
first = first.index
if isinstance(last, Token):
# last is a Token, grap the stream index from it
last = last.index
if first > last or first < 0 or last < 0 or last >= len(self.tokens):
raise ValueError(
"replace: range invalid: "+first+".."+last+
"(size="+len(self.tokens)+")")
op = ReplaceOp(self, first, last, text)
rewrites = self.getProgram(programName)
rewrites.append(op)
def delete(self, *args):
self.replace(*(list(args) + [None]))
def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME):
return self.lastRewriteTokenIndexes.get(programName, -1)
def setLastRewriteTokenIndex(self, programName, i):
self.lastRewriteTokenIndexes[programName] = i
def getProgram(self, name):
p = self.programs.get(name, None)
if p is None:
p = self.initializeProgram(name)
return p
def initializeProgram(self, name):
p = []
self.programs[name] = p
return p
def toOriginalString(self, start=None, end=None):
if start is None:
start = self.MIN_TOKEN_INDEX
if end is None:
end = self.size() - 1
buf = StringIO()
i = start
while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
buf.write(self.get(i).text)
i += 1
return buf.getvalue()
def toString(self, *args):
if len(args) == 0:
programName = self.DEFAULT_PROGRAM_NAME
start = self.MIN_TOKEN_INDEX
end = self.size() - 1
elif len(args) == 1:
programName = args[0]
start = self.MIN_TOKEN_INDEX
end = self.size() - 1
elif len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
start = args[0]
end = args[1]
if start is None:
start = self.MIN_TOKEN_INDEX
elif not isinstance(start, int):
start = start.index
if end is None:
end = len(self.tokens) - 1
elif not isinstance(end, int):
end = end.index
# ensure start/end are in range
if end >= len(self.tokens):
end = len(self.tokens) - 1
if start < 0:
start = 0
rewrites = self.programs.get(programName)
if rewrites is None or len(rewrites) == 0:
# no instructions to execute
return self.toOriginalString(start, end)
buf = StringIO()
# First, optimize instruction stream
indexToOp = self.reduceToSingleOperationPerIndex(rewrites)
# Walk buffer, executing instructions and emitting tokens
i = start
while i <= end and i < len(self.tokens):
op = indexToOp.get(i)
# remove so any left have index size-1
try:
del indexToOp[i]
except KeyError:
pass
t = self.tokens[i]
if op is None:
# no operation at that index, just dump token
buf.write(t.text)
i += 1 # move to next token
else:
i = op.execute(buf) # execute operation and skip
# include stuff after end if it's last index in buffer
# So, if they did an insertAfter(lastValidIndex, "foo"), include
# foo if end==lastValidIndex.
if end == len(self.tokens) - 1:
# Scan any remaining operations after last token
# should be included (they will be inserts).
for i in sorted(indexToOp.keys()):
op = indexToOp[i]
if op.index >= len(self.tokens)-1:
buf.write(op.text)
return buf.getvalue()
__str__ = toString
def reduceToSingleOperationPerIndex(self, rewrites):
"""
We need to combine operations and report invalid operations (like
overlapping replaces that are not completed nested). Inserts to
same index need to be combined etc... Here are the cases:
I.i.u I.j.v leave alone, nonoverlapping
I.i.u I.i.v combine: Iivu
R.i-j.u R.x-y.v | i-j in x-y delete first R
R.i-j.u R.i-j.v delete first R
R.i-j.u R.x-y.v | x-y in i-j ERROR
R.i-j.u R.x-y.v | boundaries overlap ERROR
I.i.u R.x-y.v | i in x-y delete I
I.i.u R.x-y.v | i not in x-y leave alone, nonoverlapping
R.x-y.v I.i.u | i in x-y ERROR
R.x-y.v I.x.u R.x-y.uv (combine, delete I)
R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
I.i.u = insert u before op @ index i
R.x-y.u = replace x-y indexed tokens with u
First we need to examine replaces. For any replace op:
1. wipe out any insertions before op within that range.
2. Drop any replace op before that is contained completely within
that range.
3. Throw exception upon boundary overlap with any previous replace.
Then we can deal with inserts:
1. for any inserts to same index, combine even if not adjacent.
2. for any prior replace with same left boundary, combine this
insert with replace and delete this replace.
3. throw exception if index in same range as previous replace
Don't actually delete; make op null in list. Easier to walk list.
Later we can throw as we add to index -> op map.
Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
inserted stuff would be before the replace range. But, if you
add tokens in front of a method body '{' and then delete the method
body, I think the stuff before the '{' you added should disappear too.
Return a map from token index to operation.
"""
# WALK REPLACES
for i, rop in enumerate(rewrites):
if rop is None:
continue
if not isinstance(rop, ReplaceOp):
continue
# Wipe prior inserts within range
for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
if iop.index >= rop.index and iop.index <= rop.lastIndex:
rewrites[j] = None # delete insert as it's a no-op.
# Drop any prior replaces contained within
for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i):
if (prevRop.index >= rop.index
and prevRop.lastIndex <= rop.lastIndex):
rewrites[j] = None # delete replace as it's a no-op.
continue
# throw exception unless disjoint or identical
disjoint = (prevRop.lastIndex < rop.index
or prevRop.index > rop.lastIndex)
same = (prevRop.index == rop.index
and prevRop.lastIndex == rop.lastIndex)
if not disjoint and not same:
raise ValueError(
"replace op boundaries of %s overlap with previous %s"
% (rop, prevRop))
# WALK INSERTS
for i, iop in enumerate(rewrites):
if iop is None:
continue
if not isinstance(iop, InsertBeforeOp):
continue
# combine current insert with prior if any at same index
for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
if prevIop.index == iop.index: # combine objects
# convert to strings...we're in process of toString'ing
# whole token buffer so no lazy eval issue with any
# templates
iop.text = self.catOpText(iop.text, prevIop.text)
rewrites[j] = None # delete redundant prior insert
# look for replaces where iop.index is in range; error
for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i):
if iop.index == rop.index:
rop.text = self.catOpText(iop.text, rop.text)
rewrites[i] = None # delete current insert
continue
if iop.index >= rop.index and iop.index <= rop.lastIndex:
raise ValueError(
"insert op %s within boundaries of previous %s"
% (iop, rop))
m = {}
for i, op in enumerate(rewrites):
if op is None:
continue # ignore deleted ops
assert op.index not in m, "should only be one op per index"
m[op.index] = op
return m
def catOpText(self, a, b):
x = ""
y = ""
if a is not None:
x = a
if b is not None:
y = b
return x + y
def getKindOfOps(self, rewrites, kind, before=None):
if before is None:
before = len(rewrites)
elif before > len(rewrites):
before = len(rewrites)
for i, op in enumerate(rewrites[:before]):
if op is None:
# ignore deleted
continue
if op.__class__ == kind:
yield i, op
def toDebugString(self, start=None, end=None):
if start is None:
start = self.MIN_TOKEN_INDEX
if end is None:
end = self.size() - 1
buf = StringIO()
i = start
while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
buf.write(self.get(i))
i += 1
return buf.getvalue()
| Python |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licensc]
from antlr3.constants import EOF
from antlr3.exceptions import NoViableAltException, BacktrackingFailed
class DFA(object):
"""@brief A DFA implemented as a set of transition tables.
Any state that has a semantic predicate edge is special; those states
are generated with if-then-else structures in a specialStateTransition()
which is generated by cyclicDFA template.
"""
def __init__(
self,
recognizer, decisionNumber,
eot, eof, min, max, accept, special, transition
):
## Which recognizer encloses this DFA? Needed to check backtracking
self.recognizer = recognizer
self.decisionNumber = decisionNumber
self.eot = eot
self.eof = eof
self.min = min
self.max = max
self.accept = accept
self.special = special
self.transition = transition
def predict(self, input):
"""
From the input stream, predict what alternative will succeed
using this DFA (representing the covering regular approximation
to the underlying CFL). Return an alternative number 1..n. Throw
an exception upon error.
"""
mark = input.mark()
s = 0 # we always start at s0
try:
for _ in xrange(50000):
#print "***Current state = %d" % s
specialState = self.special[s]
if specialState >= 0:
#print "is special"
s = self.specialStateTransition(specialState, input)
if s == -1:
self.noViableAlt(s, input)
return 0
input.consume()
continue
if self.accept[s] >= 1:
#print "accept state for alt %d" % self.accept[s]
return self.accept[s]
# look for a normal char transition
c = input.LA(1)
#print "LA = %d (%r)" % (c, unichr(c) if c >= 0 else 'EOF')
#print "range = %d..%d" % (self.min[s], self.max[s])
if c >= self.min[s] and c <= self.max[s]:
# move to next state
snext = self.transition[s][c-self.min[s]]
#print "in range, next state = %d" % snext
if snext < 0:
#print "not a normal transition"
# was in range but not a normal transition
# must check EOT, which is like the else clause.
# eot[s]>=0 indicates that an EOT edge goes to another
# state.
if self.eot[s] >= 0: # EOT Transition to accept state?
#print "EOT trans to accept state %d" % self.eot[s]
s = self.eot[s]
input.consume()
# TODO: I had this as return accept[eot[s]]
# which assumed here that the EOT edge always
# went to an accept...faster to do this, but
# what about predicated edges coming from EOT
# target?
continue
#print "no viable alt"
self.noViableAlt(s, input)
return 0
s = snext
input.consume()
continue
if self.eot[s] >= 0:
#print "EOT to %d" % self.eot[s]
s = self.eot[s]
input.consume()
continue
# EOF Transition to accept state?
if c == EOF and self.eof[s] >= 0:
#print "EOF Transition to accept state %d" \
# % self.accept[self.eof[s]]
return self.accept[self.eof[s]]
# not in range and not EOF/EOT, must be invalid symbol
self.noViableAlt(s, input)
return 0
else:
raise RuntimeError("DFA bang!")
finally:
input.rewind(mark)
def noViableAlt(self, s, input):
if self.recognizer._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException(
self.getDescription(),
self.decisionNumber,
s,
input
)
self.error(nvae)
raise nvae
def error(self, nvae):
"""A hook for debugging interface"""
pass
def specialStateTransition(self, s, input):
return -1
def getDescription(self):
return "n/a"
## def specialTransition(self, state, symbol):
## return 0
def unpack(cls, string):
"""@brief Unpack the runlength encoded table data.
Terence implemented packed table initializers, because Java has a
size restriction on .class files and the lookup tables can grow
pretty large. The generated JavaLexer.java of the Java.g example
would be about 15MB with uncompressed array initializers.
Python does not have any size restrictions, but the compilation of
such large source files seems to be pretty memory hungry. The memory
consumption of the python process grew to >1.5GB when importing a
15MB lexer, eating all my swap space and I was to impacient to see,
if it could finish at all. With packed initializers that are unpacked
at import time of the lexer module, everything works like a charm.
"""
ret = []
for i in range(len(string) / 2):
(n, v) = ord(string[i*2]), ord(string[i*2+1])
# Is there a bitwise operation to do this?
if v == 0xFFFF:
v = -1
ret += [v] * n
return ret
unpack = classmethod(unpack)
| Python |
#!/usr/bin/env python
""" @namespace doxypy
doxypy is an input filter for Doxygen. It preprocesses python
files so that docstrings of classes and functions are extracted
as Doxygens special python documentation blocks. It can be found
at <http://code.foosel.net/doxypy>.
Copyright (C) 2006
Gina Haeussge (gina at foosel dot net),
Philippe Neumann (phil at foosel dot net)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
""" @mainpage
@author Gina Haeussge
@author Philippe Neumann
"""
import sys
import re
from optparse import OptionParser, OptionGroup
def makeCommentBlock(commentLines, indent=""):
""" Converts the given $commentLines into a comment block.
@param commentLines The lines of the block comment
@param indent The indentation of the block
@return The indented doxygen comment block containing the
given comment lines.
"""
joinStr = "\n%s# " % indent
if options.strip:
commentLines = map(lambda x: x.strip(), commentLines)
lines = joinStr.join(commentLines)
return "%s##%s%s" % (indent, joinStr, lines)
def parse(input):
""" Searches for def and class definitions in the source, then moves
existing docstrings as special doxygen block comments in front of the
definitions.
@param input The input source to process
@return The processed input.
"""
output = []
# Comment delimiter of the docstring
commentDelim = '"""'
# Some regexes
triggerRe = re.compile("^(\s*)(def .+:|class .+:)")
commentStartRe = re.compile('^\s*(%s)' % commentDelim)
commentEndRe = re.compile('(%s)\s*$' % commentDelim)
emptyRe = re.compile("^\s*$")
hashLineRe = re.compile("^\s*#.*$")
importLineRe = re.compile("^\s*(import |from .+ import)")
# split input into lines
lines = input.split("\n")
# flags, buffers, ...
fileHeadFlag = True
triggerWordFlag = False
commentFlag = False
comment = []
triggerWs = ""
triggerLines = None
# process each line
for line in enumerate(lines):
match = re.search(triggerRe, line[1])
if match:
if triggerWordFlag and triggerLines:
output.append("\n".join(triggerLines))
triggerWordFlag = True
triggerWs = match.group(1)
fileHeadFlag = False
triggerLines = [line[1]]
continue
# file header or active keyword trigger?
if fileHeadFlag or triggerWordFlag:
# comment end of multiline comment found
if re.search(commentEndRe, line[1]) and commentFlag:
comment.append( line[1][ : line[1].rfind(commentDelim) ] )
output.append(makeCommentBlock(comment, triggerWs))
if triggerLines:
output.append("\n".join(triggerLines))
comment = []
commentFlag = False
triggerWs = ""
triggerLines = None
triggerWordFlag = False
# comment start found
elif re.search(commentStartRe, line[1]):
if re.search(commentEndRe, line[1][line[1].find(commentDelim)+len(commentDelim) :]):
# singleline comment
comment.append(line[1][line[1].find(commentDelim)+len(commentDelim) : line[1].rfind(commentDelim)])
output.append(makeCommentBlock(comment, triggerWs))
if triggerLines:
output.append("\n".join(triggerLines))
comment = []
commentFlag = False
triggerWs = ""
triggerLines = None
triggerWordFlag = False
else:
# multiline comment begin
commentFlag = True
comment.append(
line[1][line[1].find(commentDelim)+len(commentDelim):]
)
# active multiline comment -> append comment
elif commentFlag:
comment.append(line[1])
# still searching for comment
elif re.search(emptyRe, line[1]):
if triggerLines:
triggerLines.append(line[1])
else:
output.append(line[1])
# searching for file header
elif fileHeadFlag:
if not (re.search(hashLineRe, line[1]) or re.search(emptyRe, line[1]) or re.search(importLineRe, line[1])):
# fileheader over -> disable search
fileHeadFlag = False
output.append(line[1])
# no comment, disable comment search mode
else:
triggerWordFlag = False
if triggerLines:
output.append("\n".join(triggerLines))
triggerLines = None
output.append(line[1])
# just append the line
else:
output.append(line[1])
# return output
return "\n".join(output)
def loadFile(filename):
""" Loads file $filename and returns the content.
@param filename The name of the file to load
@return The content of the file.
"""
f = open(filename, 'r')
try:
content = f.read()
return content
finally:
f.close()
def optParse():
"""parses commandline options"""
parser = OptionParser(prog="doxypy", version="%prog 0.2.1")
parser.set_usage("%prog [options] filename")
parser.add_option("--trim", "--strip",
action="store_true", dest="strip",
help="enables trimming of docstrings, might be useful if you get oddly spaced output"
)
## parse options
global options
(options, filename) = parser.parse_args()
if not filename:
print >>sys.stderr, "No filename given."
sys.exit(-1)
return filename[0]
def main():
""" Opens the file given as first commandline argument and processes it,
then prints out the processed file contents.
"""
filename = optParse()
try:
input = loadFile(filename)
except IOError, (errno, msg):
print >>sys.stderr, msg
sys.exit(-1)
output = parse(input)
print output
if __name__ == "__main__":
main() | Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.