code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# coding=utf-8
from django import forms
from django.db import models
from django.contrib.auth.models import User
from registration.forms import RegistrationForm
from datetime import date
conceitos = (('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('FF', 'FF'))
anos = range(1985, date.today().year + 1)
class Avaliacao(models.Model):
professor = models.ForeignKey("Professor", related_name="avaliacoes")
aluno = models.CharField(max_length=28) # hashing.
disciplina = models.ForeignKey("Disciplina", related_name="avaliacoes")
ano = models.IntegerField(max_length=4, choices=((ano, ano) for ano in anos))
semestre = models.SmallIntegerField(max_length=1, choices=((1, 1), (2, 2)))
nota_geral = models.CharField(max_length=2, choices=conceitos)
cmnt_geral = models.TextField()
nota_didatica = models.CharField(max_length=2, choices=conceitos)
cmnt_didatica = models.TextField()
nota_dinamica = models.CharField(max_length=2, choices=conceitos)
cmnt_dinamica = models.TextField()
nota_aval = models.CharField(max_length=2, choices=conceitos)
cmnt_aval = models.TextField()
nota_disp = models.CharField(max_length=2, choices=conceitos)
cmnt_disp = models.TextField()
resposta = models.TextField(blank=True, default="")
karma = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.disciplina.codigo, self.aluno)
class Professor(models.Model):
usuario = models.ForeignKey(User, blank=True, null=True, default="", related_name="professor")
username = models.CharField(max_length=20)
nome = models.CharField(max_length=256) # vai que, sei lá, o Márcio vire professor...
def __unicode__(self):
return self.nome
class Disciplina(models.Model):
codigo = models.CharField(max_length=8, unique=True)
nome = models.CharField(max_length=128) # tem cadeiras com um nome tão grande?
def __unicode__(self):
return self.nome
class PostAvaliacao(forms.ModelForm):
class Meta:
model = Avaliacao
exclude = ('aluno', 'resposta', 'karma')
class RegistroII(RegistrationForm):
def clean_email(self):
if self.cleaned_data['email'].split('@')[1] != "inf.ufrgs.br":
raise forms.ValidationError("Apenas e-mails @inf.ufrgs.br são válidos.")
return self.cleaned_data['email'] | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
from forca.models import Professor, Disciplina, RegistroII
admin.autodiscover()
prof = {'queryset': Professor.objects.all(),
'template_name': "listaprof.html",
'template_object_name': "lista_prof"}
disc = {'queryset': Disciplina.objects.all(),
'template_name': "listadisc.html",
'template_object_name': "lista_disc"}
registro = {'form_class': RegistroII, 'template_name': "registrar.html", 'success_url': "/registro/ok"}
urlpatterns = patterns('',
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/(.*)', admin.site.root),
)
urlpatterns += patterns('django.views.generic',
(r'^$', 'simple.direct_to_template', {'template': 'index.html'}),
(r'^registro/ok/$', 'simple.direct_to_template', {'template': 'registrar_ok.html'}),
(r'^disciplina/$', 'list_detail.object_list', disc),
(r'^professor/$', 'list_detail.object_list', prof),
)
urlpatterns += patterns('forca.views',
(r'^avaliar/$', 'avaliacao'),
(r'^disciplina/(?P<coddisc>(?:[A-Z][A-Z][A-Z]|[a-z][a-z][a-z])[0-9][0-9][0-9][0-9][0-9])/$', 'disciplina'),
(r'^professor/(?P<nomeprof>[a-z]+)/$', 'professor'),
#(r'^usuario/$', 'userpage', {'user': None}),
#(r'^usuario/(?P<user>\w+)/$', 'userpage'),
)
urlpatterns += patterns('django.contrib.auth.views',
(r'^login/$', 'login', {'template_name': 'login.html'}),
(r'^logout/$', 'logout', {'template_name': 'index.html'}),
)
urlpatterns += patterns('registration.views',
(r'^registro/$', 'register', registro),
(r'^registro/ativar/$', 'activate', {'template_name': 'ativacao.html'}),
)
if settings.DEBUG:
urlpatterns += patterns('', (r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': 'static/'})) | Python |
from hashlib import sha1
from base64 import b64encode
def tripcode(nome):
return b64encode(sha1(nome + ". NOT!").digest()) | Python |
# coding = utf-8
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect
from django.template import RequestContext as RC
from django.contrib.auth.decorators import login_required
from forca.models import *
from forca.tripcode import tripcode
def disciplina(request, coddisc): # dá pra usar uma view genérica, mas não compensa
disc = get_object_or_404(Disciplina, codigo=coddisc.upper())
lista = Avaliacao.objects.filter(disciplina=disc)
param = {'disc': disc, 'lista_aval': lista}
return render_to_response('aval_disc.html', param, context_instance=RC(request))
def professor(request, nomeprof): # idem disciplina
prof = get_object_or_404(Professor, username=nomeprof)
lista = Avaliacao.objects.filter(professor=prof)
param = {'prof': prof, 'lista_aval': lista}
return render_to_response('aval_prof.html', param, context_instance=RC(request))
@login_required
def avaliacao(request): # se não fosse o hashing dava pra trocar por uma view genérica...
if request.method == 'POST':
form = PostAvaliacao(request.POST)
try:
aval = form.save(commit=False) # não salvar no bê dê ainda, falta uma coisa
aval.aluno = tripcode(request.user.username) # agora sim.
aval.save()
return HttpResponseRedirect('/') # tem algum outro lugar pra redirecionar?
except ValueError:
pass # OMG estou ignorando uma exceção!!1!!!111!!1um!!1111!!onze
else:
form = PostAvaliacao()
return render_to_response('postar.html', {'form': form}, context_instance=RC(request)) | Python |
from forca.models import *
from django.contrib import admin
admin.site.register(Disciplina)
admin.site.register(Professor)
admin.site.register(Avaliacao) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
# find_missing_files_and_cleanup.py
#
# author: xtraeme
# date: 2014/09/18
#
# In addition to finding missing and misnamed files (typically "Page 1.jpg," often misnamed
# "age1.jpg", because of how the system code navigates to a new directory --
# https://code.google.com/p/footnotereap/issues/detail?id=6#c8 ). The script also makes it
# easier to locate out of sequence files and files that are missing between a set of two
# pages (e.g. "Page 2.jpg" and "Page 4.jpg" exist, but "Page 3.jpg" doesn't.)
#
# To use the script set the directory to where you store the fold3 content and fill in the
# path to explorer.exe, dopus, total commander, or whatever you use.
#
# For more serious cleanup jobs, you'll want to set:
# cleaningup = True
#
# This instructs the script to open a file explorer window to the problem directory and
# after that spawns a webbrowser window to the case id. This makes it easier to check
# whether the content is valid or if the content needs to be redownloaded.
#
import os
import ntpath
import re
import bisect
import webbrowser
from subprocess import call
path_to_fold3 = 'G:/F/Media/__By Subject/Speculative/UFOs/Media/Websites/footnote.com/'
path_to_fileexplorer = "C:\\app\\system\\Directory Opus\\dopus.exe"
base_fold3_uri = 'http://www.fold3.com/image/1/'
#Set this to true if you want to actively find missing and misnamed files
#IMPORTANT: Set breakpoints on line 57 and 75
cleaningup = False
def launch_explorer_browser(fold3id, dospath):
if cleaningup:
webbrowser.open(base_fold3_uri + fold3id)
call([path_to_fileexplorer, dospath])
rootdir = path_to_fold3
token_file_re = re.compile(r'page (\d+)\.jpg', re.IGNORECASE)
token_dir_re = re.compile(r'\/.{4}\..{2} \- (\d+)', re.IGNORECASE)
rootdir_len = len(rootdir)
for subdir, dirs, files in os.walk(rootdir):
d = token_dir_re.search(subdir)
if d is not None:
dospath = os.path.normpath(subdir) #ntpath.splitdrive(subdir)
l = []
files_maxindex = len(files)-1
for file in files:
f = token_file_re.search(file)
if f is None:
print os.path.join(subdir[rootdir_len:], file)
launch_explorer_browser(d.groups()[0], dospath)
pass # IMPORTANT: ADD BREAKPOINT HERE IF YOU ENABLE: cleaningup = True
else:
pagenum = f.groups()[0]
position = bisect.bisect(l, pagenum)
bisect.insort(l, int(pagenum))
if files[files_maxindex] == file:
l_maxindex = len(l)-1
for x in range(0, files_maxindex):
if x < l_maxindex:
page_error = -1
if x == 0 and l[x] != 1:
page_error = str(1)
if int(l[x])+1 != int(l[x+1]):
page_error = str(l[x]+1)
if page_error != -1:
print subdir[rootdir_len:] + ": PAGE " + page_error + " IS MISSING!"
launch_explorer_browser(d.groups()[0], dospath)
pass # IMPORTANT: ADD BREAKPOINT HERE IF YOU ENABLE: cleaningup = True
| Python |
# create_sym_links_with_padded_zeros.py (CSLPZ)
#
# author: xtraeme
# date: 2014/09/20
#
# The Blue Book NARA pages adhere to the following naming convention and increment like so:
# Page 1, Page 2, ... Page 10, ..., Page 20, etc.
#
# Since the filenames don't have padded zeroes, navigating through the files using a normal
# image viewing tool like Irfanview results in Page 1 being shown first, Page 10 second,
# Page 11 third, -after seven more pages- Page 19, Page 2, and then Page 20.
#
# Ideally we would like to read the pages in their intended proper order. A naive solution
# is to just rename the files. However this breaks footnotereaper and requires resyncing all
# of the files in BitTorrent Sync.
#
# The best workaround I could think of was to create a symbolic link structure with the padded
# filenames all placed in an alternate, but identical directory tree parallel to the actual
# documents (by default the program uses the current directory name plus "- browse").
#
# So, in practice, if the base directory is "footnote.com" the sym-linked folder will be named
# "footnote.com - browse."
#
# To initialize the program, provide the path to your Fold3 working directory or to the
# specific case that you want see symlinked and padded (see line 78). For example,
#
# create_sym_links_with_padded_zeros.py [fold3 base directory]
#
# Note: The script only has to be run once to generate the symbolic link structure.
#
# Platform Notes:
# ---------------
# CSLPZ should work on Windows, Mac, and Linux. However I have only tested the script in
# Windows. So if something isn't working, check line 160 and debug the "ln -s" section.
import os
import sys
from sys import platform as _platform
import argparse
import ntpath
import re
import bisect
import subprocess
from subprocess import call
import math
class CSLPZParser(argparse.ArgumentParser):
def usage(self, msg):
self.print_help()
sys.exit(0)
def error(self, error, msg):
self.print_help()
print "\nError: " + msg
# print >>sys.stderr, globals()['__doc__']
# print >>sys.stderr, error
sys.exit(error)
#Static Path to Fold3
path_to_fold3 = '' #'G:/F/Media/__By Subject/Speculative/UFOs/Media/Websites/foonote.com'
text_desc = ("Creates a new parent directory with the base name plus '- browse' (ex. 'footnote.com' becomes 'footnote.com - browse') and "
"sym-links the newly named files to the originals files. The largest page number (ex. Page 203) is used to pad smaller page "
"values with the appropriate number of zeros (ex. 'Page 1' becomes 'Page 001').")
parser = CSLPZParser(description = text_desc)
parser.add_argument('srcpath', metavar = 'src-path', type=str, nargs='?', help='path to fold3 data directory (default: footnote.com)')
parser.add_argument('-d', '--dest', dest='dstpath', action='store', help='path to output link structure (default: ..)')
parser.add_argument('-q', '--quiet', action='store_true', help="quiet (no output)")
parser.add_argument('-v', '--verbose', action='count', default=0, help="increase output verbosity")
args = parser.parse_args()
token_file_re = re.compile(r'page (\d+)\.\w{3}', re.IGNORECASE)
token_dir_re = re.compile(r'[\/|\\](.{4}\..{2} \- (\d+).*$)', re.IGNORECASE)
if(args.srcpath):
if(os.path.isdir(args.srcpath)):
path_to_fold3 = os.path.normpath(args.srcpath)
else:
parser.error(1, "Fold3 Path is invalid: " + args.srcpath)
else:
if(not os.path.isdir(path_to_fold3)):
cwddir = os.path.dirname(os.path.realpath(__file__))
found_fold3_dir = False
for subdir, dirs, files in os.walk(cwddir):
d = token_dir_re.search(subdir)
if d is not None:
found_fold3_dir = True
break
if(found_fold3_dir):
path_to_fold3 = cwddir
else:
parser.error(1, "No Fold3 Path found")
if(not args.dstpath):
path_to_target = os.path.join(os.path.abspath(os.path.join(path_to_fold3, os.pardir)), os.path.basename(os.path.normpath(path_to_fold3)) + " - browse") #'G:/F/Media/__By Subject/Speculative/UFOs/Media/Websites/footnote.com - browse'
else:
if(os.path.isdir(args.dstpath)):
path_to_target = os.path.join(args.dstpath, os.path.basename(os.path.normpath(path_to_fold3)) + " - browse")
else:
parser.error(1, "Destination path (" + args.dstpath + ") is invalid")
rootdir = path_to_fold3
rootdir_len = len(rootdir)
for subdir, dirs, files in os.walk(rootdir):
d = token_dir_re.search(subdir)
if d is not None:
dospath = os.path.normpath(subdir) #ntpath.splitdrive(subdir)
if(not args.quiet):
print "Working on: " + dospath
numlist = []
files_maxindex = len(files)-1
targetpath = os.path.join(path_to_target, d.groups()[0])
if not os.path.isdir(targetpath):
os.makedirs(targetpath)
for file in files:
f = token_file_re.search(file)
if f is not None:
pagenum = f.groups()[0]
position = bisect.bisect(numlist, pagenum)
bisect.insort(numlist, int(pagenum))
if files[files_maxindex] == file:
if f is not None:
l_maxindex = len(numlist)-1
#The ceiling of Log_10 (any number) will return the length of the number except for 10
maxdigits = math.ceil(math.log10(numlist[l_maxindex]) if numlist[l_maxindex] != 10 else 2)
for file in files:
f = token_file_re.search(file)
if f is not None:
#Create padding
filenum = f.groups()[0] #filenum = (file[5:])[:-4]
newnum = '{s:{c}>{n}}'.format(s=filenum,n=int(maxdigits),c='0')
newfile = "Page "+ newnum + ".jpg"
else:
#We want to include all files even if they don't follow our search pattern
newfile = file
targetfile = os.path.normpath(os.path.join(targetpath, newfile)) #.replace(r"\\", r"\")
if(not os.path.exists(targetfile)):
origfile = os.path.normpath(os.path.join(subdir, file))
if _platform == "win32":
run = subprocess.Popen([r"mklink",
targetfile,
origfile],
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
else:
#NOTE: THIS IS UNTESTED!!!
run = subprocess.Popen([r"ln",
"-s",
origfile,
targetfile],
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out,err = [e.splitlines() for e in run.communicate() ]
if(args.verbose):
for line in out:
print line
if(not args.quiet):
for line in err:
print err
| Python |
#!/usr/bin/python
import sys
import os
import string
import subprocess
import time
"""
Usage: rpcexec -n n_to_start -f [hostsfile] [program] [options]
To start local only: rpcexec [program] [options]
"""
def escape(s):
s = string.replace(s, '"', '\\"')
s = string.replace(s, "'", "\\'")
return s
#enddef
# gui: if xterm should run
# machines: a vector of all the machines
# port: a vector of the port number for ssh to connect to. must be same length as machines
# machineid: The machineid to generate
# prog: program to run
# opts: options for the program
def get_ssh_cmd(gui, machines, port, machineid, prog, opts):
allmachines = '"' + string.join(machines, ',') + '"'
# construct the command line
cwd = os.getcwd()
if (gui):
sshcmd = 'ssh -X -Y -n -q '
else:
sshcmd = 'ssh -n -q '
#endif
guicmd = ''
if (gui):
guicmd = 'xterm -geometry 120x60 -e '
#endif
if (machines[i] == "localhost" or machines[i].startswith("127.")):
cmd = 'env SPAWNNODES=%s SPAWNID=%d %s %s' % (allmachines,i, prog, opts)
elif (port[i] == 22):
cmd = sshcmd + '%s "cd %s ; env SPAWNNODES=%s SPAWNID=%d %s %s %s"' % \
(machines[machineid], escape(cwd), escape(allmachines),machineid, \
guicmd, escape(prog), escape(opts))
else:
cmd = sshcmd + '-oPort=%d %s "cd %s ; env SPAWNNODES=%s SPAWNID=%d %s %s %s"' % \
(port[machineid], machines[machineid], escape(cwd), escape(allmachines), \
machineid, guicmd, escape(prog), escape(opts))
#endif
return cmd
#enddef
def get_screen_cmd(gui, machines, port, machineid, prog, opts):
allmachines = '"' + string.join(machines, ',') + '"'
# construct the command line
cwd = os.getcwd()
sshcmd = 'ssh -t '
#endif
guicmd = ''
if (machines[i] == "localhost" or machines[i].startswith("127.")):
cmd = ['export SPAWNNODES=%s SPAWNID=%d ; %s %s' % (allmachines,i, prog, opts)]
elif (port[i] == 22):
cmd = [sshcmd + '%s "cd %s ; export SPAWNNODES=%s SPAWNID=%d; %s %s %s ; bash -il"' % \
(machines[machineid], escape(cwd), escape(allmachines),machineid, \
guicmd, escape(prog), escape(opts))]
else:
cmd = [sshcmd + '-oPort=%d %s "cd %s ; export SPAWNNODES=%s SPAWNID=%d; %s %s %s ; bash -il"' % \
(port[machineid], machines[machineid], escape(cwd), escape(allmachines), \
machineid, guicmd, escape(prog), escape(opts))]
#endif
return cmd
#enddef
def shell_popen(cmd):
print cmd
return subprocess.Popen(cmd, shell=True)
#endif
def shell_wait_native(cmd):
print cmd
pid = subprocess.Popen(cmd, shell=True)
os.waitpid(pid.pid, 0)
#time.sleep(0.5)
#endif
nmachines = 0
hostsfile = ''
prog = ''
opts = ''
gui = 0
inscreen = 0
screenname = ''
printhelp = 0
i = 1
while(i < len(sys.argv)):
if sys.argv[i] == '-h' or sys.argv[i] == '--help':
printhelp = 1
break
elif sys.argv[i] == '-n':
nmachines = int(sys.argv[i+1])
i = i + 2
elif sys.argv[i] == '-f':
hostsfile = sys.argv[i+1]
i = i + 2
elif sys.argv[i] == '-g':
gui = 1
i = i + 1
elif sys.argv[i] == '-s':
inscreen = 1
screenname = sys.argv[i+1]
i = i + 2
else:
prog = sys.argv[i]
if (len(sys.argv) > i+1):
opts = string.join(sys.argv[(i+1):])
#endif
break
#endif
#endwhile
if inscreen and gui:
print ("-s and -g are mutually exclusive")
exit(0)
#endif
if (printhelp):
print
print("Usage: rpcexec -n [n_to_start] -f [hostsfile] [program] [options]")
print("To start local only: rpcexec [program] [options]")
print("Optional Arguments:")
print("-g: Launch the command within Xterm on all machines. ")
print("-s [screenname] : Launch a screen session and launch the")
print(" commands in each window in each window. Any ssh connections")
print(" are preserved on termination of the program with environment")
print(" properly set up for subsequent executions")
print("")
print("Note: -s [screenname] and -g are mutually exclusive")
exit(0)
#endif
if (nmachines == 0 and hostsfile == ''):
cmd = 'env SPAWNNODES=localhost SPAWNID=0 %s %s' % (prog, opts)
p = shell_popen(cmd)
os.waitpid(p.pid, 0)
exit(0)
#endif
print('Starting ' + str(nmachines) + ' machines')
print('Hosts file: ' + hostsfile)
print('Command Line to run: ' + prog + ' ' + opts)
# open the hosts file and read the machines
try:
f = open(hostsfile, 'r')
except:
print
print("Unable to open hosts file")
print
exit(0)
#endtry
machines = [''] * nmachines
port = [22] * nmachines
for i in range(nmachines):
try:
machines[i] = string.strip(f.readline())
colonsplit = string.split(machines[i], ':')
if (len(colonsplit) == 2):
machines[i] = string.strip(colonsplit[0])
port[i] = int(colonsplit[1])
#endif
except:
print
print("Unable to read line " + str(i+1) + " of hosts file")
print
exit(0)
#endfor
f.close()
# the commands to run to start for each node
cmd = [None] * nmachines
for i in range(nmachines):
if (inscreen == 0):
cmd[i] = get_ssh_cmd(gui, machines, port, i, prog, opts)
else:
cmd[i] = get_screen_cmd(gui, machines, port, i, prog, opts)
print cmd[i]
#endif
#endfor
if (inscreen == 0):
# now issue the ssh commands
procs = [None] * nmachines
for i in range(nmachines):
procs[i] = shell_popen(cmd[i])
#endfor
for i in range(nmachines):
os.waitpid(procs[i].pid, 0)
#endfor
else:
# create a new empty screen with the screen name
shell_wait_native("screen -h 10000 -d -m -S " + screenname)
shell_wait_native("screen -h 10000 -x %s -p 0 -X title %s" % (screenname, machines[0][0:8]))
# start a bunch of empty screens
for i in range(nmachines - 1):
shell_wait_native("screen -x %s -X screen -t %s" % (screenname, machines[i+1][0:8]))
#endfor
# set the titles in each one and run the program
# we stripe it across windows so if there are ssh commands they will
# have time to finish running first
for j in range(2):
for i in range(nmachines):
if (len(cmd[i]) > j and cmd[i][j] != None):
shell_wait_native("screen -x %s -p %d -X stuff %s" % (screenname, i, "'"+cmd[i][j]+"\n'"))
#endif
#endfor
#endfor
#endif
| Python |
#!/usr/bin/python
import os
import sys
def update_source(filename, oldcopyright, copyright):
fdata = file(filename,"r+").read()
# If there was a previous copyright remove it
if (oldcopyright != None):
if (fdata.startswith(oldcopyright)):
fdata = fdata[len(oldcopyright):]
# If the file does not start with the new copyright
if not (fdata.startswith(copyright)):
print " updating: " + filename
fdata = copyright + fdata
file(filename,"w").write(fdata)
def recursive_traversal(dir, oldcopyright, copyright):
fns = os.listdir(dir)
print "Processing directory: "+dir
for fn in fns:
fullfn = os.path.join(dir,fn)
if (os.path.isdir(fullfn)):
recursive_traversal(fullfn, oldcopyright, copyright)
else:
if (fullfn.endswith(".cpp") or fullfn.endswith(".hpp") or
fullfn.endswith(".cxx") ):
update_source(fullfn, oldcopyright, copyright)
oldcright = file(sys.argv[1],"r+").read()
cright = file(sys.argv[2],"r+").read()
recursive_traversal(sys.argv[3], oldcright, cright)
exit()
| Python |
#!/usr/bin/python
import sys
import string
import subprocess
def left_child(index) : return 2 * index + 1
def right_child(index) : return 2 * index + 2
def escape_str(text) :
return text.replace('\\', '\\\\').replace('"', '\\"')
def do_send(src, dest, path ) :
return ' ( rsync -avz ' + \
path + '/' + src + ' ' + \
dest + ':' + path + ' ) '
def make_str(names, a, b, index) :
assert(left_child(index) < len(names));
text = '( ssh ' + names[index]
text = text + ' " hostname; ';
text = text + \
do_send(a, names[left_child(index)], b)
if right_child(index) < len(names) :
text = text + ' & ' + \
do_send(a, names[right_child(index)], b) + \
' ; wait ; '
if left_child(left_child(index)) < len(names) :
text = text + \
escape_str(make_str(names, a, b, left_child(index)))
if left_child(right_child(index)) < len(names) :
text = text + ' & ' + \
escape_str(make_str(names, a, b, right_child(index))) + \
' ; wait ; '
text = text + ' " ) '
return text;
host_filename = sys.argv[1]
source_path = sys.argv[2]
dest_path = sys.argv[3]
textfile = open(host_filename, "r")
lines = textfile.readlines()
machines = [ x.replace('\n', '') for x in lines]
print machines
command = 'rsync -avz ' + source_path + ' ' + \
machines[0] + ':' + dest_path;
print command;
subprocess.call(command, shell=True)
command = make_str(machines, source_path, dest_path, 0);
print command
subprocess.call(command, shell=True)
| Python |
for v in graph.getVertices():
print(v.value.rank) | Python |
import math
# Python implementation of pagerank
damping = 0.85
def update(scope, scheduler):
pvertex = scope.getVertex().value
sumval = pvertex.rank * pvertex.selfedge + sum([e.value * scope.getNeighbor(e.from).value.rank for e in scope.getInboundEdges()])
newval = (1-damping)/scope.getNumOfVertices() + damping*sumval
if (abs(newval-pvertex.rank)>0.00001):
scheduler.addTaskToOutbound(scope)
pvertex.rank = newval
update(scope, scheduler) | Python |
class pagerank_vertex:
def __init__(self, value, selfedge):
self.rank = value
self.selfedge = selfedge
f = open(filename, "r")
lines = f.readlines()
# First line is header
header = lines[0]
lines = lines[1:]
nvertices = int(header.split(",")[1])
for i in range(0,nvertices):
# format: first value is value, second is self edge weight
graph.addVertex(pagerank_vertex(1.0/nvertices, 0.0))
for l in lines:
t = l.split(",")
i = int(t[0])-1
j = int(t[1])-1
# Each line ends (annoyingly) to \n
w = float(t[2][:-1])
if (i != j):
graph.addEdge(j, i, w)
else:
graph.getVertex(i).value.selfedge = w | Python |
import math
lamb = 0.5
# Python implementation of Lasso Shooting algorithm.
# min ||Ax-y||_2^2 + lambda ||x||_1
def update(scope, scheduler):
# Of class lasso_variable_vertex or lasso_estimate_vertex
lassov = scope.getVertex().value
if (lassov.vtype == 0):
if lassov.initialized == False:
# Initialize covariance
lassov.covar = 2.0*sum([e.value*e.value for e in scope.getOutboundEdges()])
# Initialize (Ay)_i
lassov.Ay = 2.0*sum([e.value * scope.getNeighbor(e.to).value.observed for e in scope.getOutboundEdges()])
lassov.initialized = True
# Compute (Ax)_i
curest = sum([e.value * scope.getNeighbor(e.to).value.curval for e in scope.getOutboundEdges()])
newval = soft_threshold(lamb, curest*2 - lassov.covar*lassov.value - lassov.Ay)/lassov.covar
# if (newval == 0.0):
# print("zero!")
#if (scope.getVertex().getId() % 100 == 0):
# print(scope.getVertex().getId(), lassov.value, newval, curest*2 - lassov.covar*lassov.value - lassov.Ay, lassov.covar, lassov.Ay)
if newval != lassov.value:
delta = newval-lassov.value
lassov.value = newval
for e in scope.getOutboundEdges():
scope.getNeighbor(e.to).value.curval += delta * e.value
def soft_threshold (lamb, x):
if (x > lamb):
return (lamb-x)
elif (x < lamb):
return (-lamb-x)
else:
return 0.0
update(scope, scheduler) | Python |
graphlab.setScheduler("round_robin")
graphlab.setIterations(100)
graphlab.setScopeType("vertex") | Python |
import math
# Python implementation of pagerank
def update(scope, scheduler):
vertex = scope.getVertex()
oldval = vertex.value
newval = vertex.value * vertex.selfEdgeWeight
newval = newval + sum([e.weight*e.value for e in scope.getInboundEdges()])
vertex.setValue(newval)
if (abs(newval-oldval)>0.00001):
scheduler.addTaskToOutbound(scope)
update(scope, scheduler) | Python |
leastsqr_err = 0.0
penalty = 0.0
lamb = 0.5
for v in graph.getVertices():
lassov = v.value
if lassov.vtype == 0:
penalty += lamb * abs(lassov.value)
else:
leastsqr_err += pow(lassov.observed - lassov.curval,2)
print("Objective:", penalty + leastsqr_err) | Python |
#
# Solve Lasso: min ||Ax-y||_2^2 + \lambda ||x||_1
#
# We present Lasso as a bipartite graph. On the left side, we have
# variables x_i (predictors) and on the right side the current estimates
# for y_i = (Ax)_i. Sides are connected by edges weighted by A_ij
#
#
# Rights side of the graph. Estimate for y_i. We store
# the actual y_i as well (observed), in order to quickly
# compute prediction error.
#
class lasso_estimate_vertex:
def __init__(self, value, observed):
self.curval = value
self.lastval = value
self.observed = observed
self.vtype = 1
class lasso_variable_vertex:
def __init__(self, value):
self.value = value
self.covar = 0
self.Ay = 0
self.initialized = False
self.vtype = 0
f = open(filename, "r")
lines = f.readlines()
header = lines[0].split(",")
assert(header[0] == "y")
# First read y-values
ny = int(header[1])
for i in range(1,ny+1):
val = float(lines[i])
graph.addVertex(lasso_estimate_vertex(0.0, val))
# Remove the first part
lines = lines[ny+1:]
# Read edges
header = lines[0].split(",")
print(header)
assert(header[0] == "A")
n = int(header[1])
nx = int(header[2])
# Create variables
for i in range(0,nx):
graph.addVertex(lasso_variable_vertex(0.0))
for i in range(1,n+1):
ln = lines[i].split(",")
idx = int(ln[0])-1
val = float(ln[1])
row = idx%ny
col = idx/ny
# Create edge between variable x_col and y_row
graph.addEdge(col+ny, row, val)
assert(col+ny>row)
#print(idx, col, row)
print("Data loaded") | Python |
# module pyparsing.py
#
# Copyright (c) 2003-2010 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.5"
__versionTime__ = "12 Aug 2010 03:56"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_PY3K = sys.version_info[0] > 2
if _PY3K:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
alphas = string.ascii_lowercase + string.ascii_uppercase
else:
_MAX_INT = sys.maxint
range = xrange
set = lambda s : dict( [(c,0) for c in s] )
alphas = string.lowercase + string.uppercase
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import builtins
for fname in "sum len enumerate sorted reversed list tuple set any all".split():
try:
singleArgBuiltins.append(getattr(builtins,fname))
except AttributeError:
continue
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
for from_,to_ in zip('&><"\'', "amp gt lt quot apos".split()):
data = data.replace(from_, '&'+to_+';')
return data
class _Constants(object):
pass
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{ParseFatalException}, but thrown internally when an
C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
#~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( reversed(self.__toklist) )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if True: #name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as C{f(s,l,t)}."""
STAR_ARGS = 4
# special handling for single-argument builtins
if (f in singleArgBuiltins):
numargs = 1
else:
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if not _PY3K:
codeObj = f.func_code
else:
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if not _PY3K:
if hasattr(f,"im_self"):
numargs -= 1
else:
if hasattr(f,"__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
if not _PY3K:
call_im_func_code = f.__call__.im_func.func_code
else:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 0
except AttributeError:
if not _PY3K:
call_func_code = f.__call__.func_code
else:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s,l,t):
return f(f.__call__.__self__, s,l,t)
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{ParseFatalException}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException:
#~ print ("Exception raised:", err)
err = None
if self.debugActions[2]:
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
if err is None:
err = sys.exc_info()[1]
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
raise value
return value
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException:
pe = sys.exc_info()[1]
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{StringEnd()}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
#loc = self.preParse( instring, loc )
se = StringEnd()
se._parse( instring, loc )
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def transformString( self, instring ):
"""Extension to C{scanString}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out))
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{scanString}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{And} with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + ZeroOrMore(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}
- C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{MatchFirst}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{Or}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{Each}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{NotAny}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{setResultsName}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{Literal}::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{Word} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given C{ParseExpressions} to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException:
pe = sys.exc_info()[1]
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException:
err = sys.exc_info()[1]
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpressions} to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += list(e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt)
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print("found ignoreExpr, advance to", loc)
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception:
exc = sys.exc_info()[1]
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{MatchFirst} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{keepOriginalText}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as False, then the return value is a
C{ParseResults} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{originalTextFor} contains expressions with defined
results names, you must set C{asString} to False if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{transformString()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{originalTextFor}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one blockStatement.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException:
err = sys.exc_info()[1]
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| Python |
# module pyparsing.py
#
# Copyright (c) 2003-2010 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.5"
__versionTime__ = "12 Aug 2010 03:56"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_PY3K = sys.version_info[0] > 2
if _PY3K:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
_str2dict = set
alphas = string.ascii_lowercase + string.ascii_uppercase
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
alphas = string.lowercase + string.uppercase
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len enumerate sorted reversed list tuple set any all".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{ParseFatalException}, but thrown internally when an
C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
#~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if True: #name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as C{f(s,l,t)}."""
STAR_ARGS = 4
# special handling for single-argument builtins
if (f in singleArgBuiltins):
numargs = 1
else:
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if not _PY3K:
codeObj = f.func_code
else:
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if not _PY3K:
if hasattr(f,"im_self"):
numargs -= 1
else:
if hasattr(f,"__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
if not _PY3K:
call_im_func_code = f.__call__.im_func.func_code
else:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 0
except AttributeError:
if not _PY3K:
call_func_code = f.__call__.func_code
else:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s,l,t):
return f(f.__call__.__self__, s,l,t)
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{ParseFatalException}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException:
#~ print ("Exception raised:", err)
err = None
if self.debugActions[2]:
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
if err is None:
err = sys.exc_info()[1]
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
raise value
return value
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException:
pe = sys.exc_info()[1]
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{StringEnd()}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
#loc = self.preParse( instring, loc )
se = StringEnd()
se._parse( instring, loc )
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def transformString( self, instring ):
"""Extension to C{scanString}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{scanString}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{And} with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + ZeroOrMore(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}
- C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{MatchFirst}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{Or}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{Each}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{NotAny}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{setResultsName}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{Literal}::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{Word} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given C{ParseExpressions} to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException:
pe = sys.exc_info()[1]
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException:
err = sys.exc_info()[1]
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpressions} to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print "found ignoreExpr, advance to", loc
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception:
exc = sys.exc_info()[1]
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{MatchFirst} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{keepOriginalText}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as False, then the return value is a
C{ParseResults} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{originalTextFor} contains expressions with defined
results names, you must set C{asString} to False if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{transformString()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{originalTextFor}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one blockStatement.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException:
err = sys.exc_info()[1]
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| Python |
#!/usr/bin/python
'''Usage: %s [OPTIONS] <input file(s)>
Generate test source file for CxxTest.
-v, --version Write CxxTest version
-o, --output=NAME Write output to file NAME
--runner=CLASS Create a main() function that runs CxxTest::CLASS
--gui=CLASS Like --runner, with GUI component
--error-printer Same as --runner=ErrorPrinter
--abort-on-fail Abort tests on failed asserts (like xUnit)
--have-std Use standard library (even if not found in tests)
--no-std Don\'t use standard library (even if found in tests)
--have-eh Use exception handling (even if not found in tests)
--no-eh Don\'t use exception handling (even if found in tests)
--longlong=[TYPE] Use TYPE (default: long long) as long long
--template=TEMPLATE Use TEMPLATE file to generate the test runner
--include=HEADER Include HEADER in test runner before other headers
--root Write CxxTest globals
--part Don\'t write CxxTest globals
--no-static-init Don\'t rely on static initialization
'''
import re
import sys
import getopt
import glob
import string
# Global variables
suites = []
suite = None
inBlock = 0
outputFileName = None
runner = None
gui = None
root = None
part = None
noStaticInit = None
templateFileName = None
headers = []
haveExceptionHandling = 0
noExceptionHandling = 0
haveStandardLibrary = 0
noStandardLibrary = 0
abortOnFail = 0
factor = 0
longlong = 0
def main():
'''The main program'''
files = parseCommandline()
scanInputFiles( files )
writeOutput()
def usage( problem = None ):
'''Print usage info and exit'''
if problem is None:
print usageString()
sys.exit(0)
else:
sys.stderr.write( usageString() )
abort( problem )
def usageString():
'''Construct program usage string'''
return __doc__ % sys.argv[0]
def abort( problem ):
'''Print error message and exit'''
sys.stderr.write( '\n' )
sys.stderr.write( problem )
sys.stderr.write( '\n\n' )
sys.exit(2)
def parseCommandline():
'''Analyze command line arguments'''
try:
options, patterns = getopt.getopt( sys.argv[1:], 'o:r:',
['version', 'output=', 'runner=', 'gui=',
'error-printer', 'abort-on-fail', 'have-std', 'no-std',
'have-eh', 'no-eh', 'template=', 'include=',
'root', 'part', 'no-static-init', 'factor', 'longlong='] )
except getopt.error, problem:
usage( problem )
setOptions( options )
return setFiles( patterns )
def setOptions( options ):
'''Set options specified on command line'''
global outputFileName, templateFileName, runner, gui, haveStandardLibrary, factor, longlong
global haveExceptionHandling, noExceptionHandling, abortOnFail, headers, root, part, noStaticInit
for o, a in options:
if o in ('-v', '--version'):
printVersion()
elif o in ('-o', '--output'):
outputFileName = a
elif o == '--template':
templateFileName = a
elif o == '--runner':
runner = a
elif o == '--gui':
gui = a
elif o == '--include':
if not re.match( r'^["<].*[>"]$', a ):
a = ('"%s"' % a)
headers.append( a )
elif o == '--error-printer':
runner = 'ErrorPrinter'
haveStandardLibrary = 1
elif o == '--abort-on-fail':
abortOnFail = 1
elif o == '--have-std':
haveStandardLibrary = 1
elif o == '--no-std':
noStandardLibrary = 1
elif o == '--have-eh':
haveExceptionHandling = 1
elif o == '--no-eh':
noExceptionHandling = 1
elif o == '--root':
root = 1
elif o == '--part':
part = 1
elif o == '--no-static-init':
noStaticInit = 1
elif o == '--factor':
factor = 1
elif o == '--longlong':
if a:
longlong = a
else:
longlong = 'long long'
if noStaticInit and (root or part):
abort( '--no-static-init cannot be used with --root/--part' )
if gui and not runner:
runner = 'StdioPrinter'
def printVersion():
'''Print CxxTest version and exit'''
sys.stdout.write( "This is CxxTest version 3.10.1.\n" )
sys.exit(0)
def setFiles( patterns ):
'''Set input files specified on command line'''
files = expandWildcards( patterns )
if len(files) is 0 and not root:
usage( "No input files found" )
return files
def expandWildcards( patterns ):
'''Expand all wildcards in an array (glob)'''
fileNames = []
for pathName in patterns:
patternFiles = glob.glob( pathName )
for fileName in patternFiles:
fileNames.append( fixBackslashes( fileName ) )
return fileNames
def fixBackslashes( fileName ):
'''Convert backslashes to slashes in file name'''
return re.sub( r'\\', '/', fileName, 0 )
def scanInputFiles(files):
'''Scan all input files for test suites'''
for file in files:
scanInputFile(file)
global suites
if len(suites) is 0 and not root:
abort( 'No tests defined' )
def scanInputFile(fileName):
'''Scan single input file for test suites'''
file = open(fileName)
lineNo = 0
while 1:
line = file.readline()
if not line:
break
lineNo = lineNo + 1
scanInputLine( fileName, lineNo, line )
closeSuite()
file.close()
def scanInputLine( fileName, lineNo, line ):
'''Scan single input line for interesting stuff'''
scanLineForExceptionHandling( line )
scanLineForStandardLibrary( line )
scanLineForSuiteStart( fileName, lineNo, line )
global suite
if suite:
scanLineInsideSuite( suite, lineNo, line )
def scanLineInsideSuite( suite, lineNo, line ):
'''Analyze line which is part of a suite'''
global inBlock
if lineBelongsToSuite( suite, lineNo, line ):
scanLineForTest( suite, lineNo, line )
scanLineForCreate( suite, lineNo, line )
scanLineForDestroy( suite, lineNo, line )
def lineBelongsToSuite( suite, lineNo, line ):
'''Returns whether current line is part of the current suite.
This can be false when we are in a generated suite outside of CXXTEST_CODE() blocks
If the suite is generated, adds the line to the list of lines'''
if not suite['generated']:
return 1
global inBlock
if not inBlock:
inBlock = lineStartsBlock( line )
if inBlock:
inBlock = addLineToBlock( suite, lineNo, line )
return inBlock
std_re = re.compile( r"\b(std\s*::|CXXTEST_STD|using\s+namespace\s+std\b|^\s*\#\s*include\s+<[a-z0-9]+>)" )
def scanLineForStandardLibrary( line ):
'''Check if current line uses standard library'''
global haveStandardLibrary, noStandardLibrary
if not haveStandardLibrary and std_re.search(line):
if not noStandardLibrary:
haveStandardLibrary = 1
exception_re = re.compile( r"\b(throw|try|catch|TSM?_ASSERT_THROWS[A-Z_]*)\b" )
def scanLineForExceptionHandling( line ):
'''Check if current line uses exception handling'''
global haveExceptionHandling, noExceptionHandling
if not haveExceptionHandling and exception_re.search(line):
if not noExceptionHandling:
haveExceptionHandling = 1
suite_re = re.compile( r'\bclass\s+(\w+)\s*:\s*public\s+((::)?\s*CxxTest\s*::\s*)?TestSuite\b' )
generatedSuite_re = re.compile( r'\bCXXTEST_SUITE\s*\(\s*(\w*)\s*\)' )
def scanLineForSuiteStart( fileName, lineNo, line ):
'''Check if current line starts a new test suite'''
m = suite_re.search( line )
if m:
startSuite( m.group(1), fileName, lineNo, 0 )
m = generatedSuite_re.search( line )
if m:
sys.stdout.write( "%s:%s: Warning: Inline test suites are deprecated.\n" % (fileName, lineNo) )
startSuite( m.group(1), fileName, lineNo, 1 )
def startSuite( name, file, line, generated ):
'''Start scanning a new suite'''
global suite
closeSuite()
suite = { 'name' : name,
'file' : file,
'cfile' : cstr(file),
'line' : line,
'generated' : generated,
'object' : 'suite_%s' % name,
'dobject' : 'suiteDescription_%s' % name,
'tlist' : 'Tests_%s' % name,
'tests' : [],
'lines' : [] }
def lineStartsBlock( line ):
'''Check if current line starts a new CXXTEST_CODE() block'''
return re.search( r'\bCXXTEST_CODE\s*\(', line ) is not None
test_re = re.compile( r'^([^/]|/[^/])*\bvoid\s+([Tt]est\w+)\s*\(\s*(void)?\s*\)' )
def scanLineForTest( suite, lineNo, line ):
'''Check if current line starts a test'''
m = test_re.search( line )
if m:
addTest( suite, m.group(2), lineNo )
def addTest( suite, name, line ):
'''Add a test function to the current suite'''
test = { 'name' : name,
'suite' : suite,
'class' : 'TestDescription_%s_%s' % (suite['name'], name),
'object' : 'testDescription_%s_%s' % (suite['name'], name),
'line' : line,
}
suite['tests'].append( test )
def addLineToBlock( suite, lineNo, line ):
'''Append the line to the current CXXTEST_CODE() block'''
line = fixBlockLine( suite, lineNo, line )
line = re.sub( r'^.*\{\{', '', line )
e = re.search( r'\}\}', line )
if e:
line = line[:e.start()]
suite['lines'].append( line )
return e is None
def fixBlockLine( suite, lineNo, line):
'''Change all [E]TS_ macros used in a line to _[E]TS_ macros with the correct file/line'''
return re.sub( r'\b(E?TSM?_(ASSERT[A-Z_]*|FAIL))\s*\(',
r'_\1(%s,%s,' % (suite['cfile'], lineNo),
line, 0 )
create_re = re.compile( r'\bstatic\s+\w+\s*\*\s*createSuite\s*\(\s*(void)?\s*\)' )
def scanLineForCreate( suite, lineNo, line ):
'''Check if current line defines a createSuite() function'''
if create_re.search( line ):
addSuiteCreateDestroy( suite, 'create', lineNo )
destroy_re = re.compile( r'\bstatic\s+void\s+destroySuite\s*\(\s*\w+\s*\*\s*\w*\s*\)' )
def scanLineForDestroy( suite, lineNo, line ):
'''Check if current line defines a destroySuite() function'''
if destroy_re.search( line ):
addSuiteCreateDestroy( suite, 'destroy', lineNo )
def cstr( str ):
'''Convert a string to its C representation'''
return '"' + string.replace( str, '\\', '\\\\' ) + '"'
def addSuiteCreateDestroy( suite, which, line ):
'''Add createSuite()/destroySuite() to current suite'''
if suite.has_key(which):
abort( '%s:%s: %sSuite() already declared' % ( suite['file'], str(line), which ) )
suite[which] = line
def closeSuite():
'''Close current suite and add it to the list if valid'''
global suite
if suite is not None:
if len(suite['tests']) is not 0:
verifySuite(suite)
rememberSuite(suite)
suite = None
def verifySuite(suite):
'''Verify current suite is legal'''
if suite.has_key('create') and not suite.has_key('destroy'):
abort( '%s:%s: Suite %s has createSuite() but no destroySuite()' %
(suite['file'], suite['create'], suite['name']) )
if suite.has_key('destroy') and not suite.has_key('create'):
abort( '%s:%s: Suite %s has destroySuite() but no createSuite()' %
(suite['file'], suite['destroy'], suite['name']) )
def rememberSuite(suite):
'''Add current suite to list'''
global suites
suites.append( suite )
def writeOutput():
'''Create output file'''
if templateFileName:
writeTemplateOutput()
else:
writeSimpleOutput()
def writeSimpleOutput():
'''Create output not based on template'''
output = startOutputFile()
writePreamble( output )
writeMain( output )
writeWorld( output )
output.close()
include_re = re.compile( r"\s*\#\s*include\s+<cxxtest/" )
preamble_re = re.compile( r"^\s*<CxxTest\s+preamble>\s*$" )
world_re = re.compile( r"^\s*<CxxTest\s+world>\s*$" )
def writeTemplateOutput():
'''Create output based on template file'''
template = open(templateFileName)
output = startOutputFile()
while 1:
line = template.readline()
if not line:
break;
if include_re.search( line ):
writePreamble( output )
output.write( line )
elif preamble_re.search( line ):
writePreamble( output )
elif world_re.search( line ):
writeWorld( output )
else:
output.write( line )
template.close()
output.close()
def startOutputFile():
'''Create output file and write header'''
if outputFileName is not None:
output = open( outputFileName, 'w' )
else:
output = sys.stdout
output.write( "/* Generated file, do not edit */\n\n" )
return output
wrotePreamble = 0
def writePreamble( output ):
'''Write the CxxTest header (#includes and #defines)'''
global wrotePreamble, headers, longlong
if wrotePreamble: return
output.write( "#ifndef CXXTEST_RUNNING\n" )
output.write( "#define CXXTEST_RUNNING\n" )
output.write( "#endif\n" )
output.write( "\n" )
if haveStandardLibrary:
output.write( "#define _CXXTEST_HAVE_STD\n" )
if haveExceptionHandling:
output.write( "#define _CXXTEST_HAVE_EH\n" )
if abortOnFail:
output.write( "#define _CXXTEST_ABORT_TEST_ON_FAIL\n" )
if longlong:
output.write( "#define _CXXTEST_LONGLONG %s\n" % longlong )
if factor:
output.write( "#define _CXXTEST_FACTOR\n" )
for header in headers:
output.write( "#include %s\n" % header )
output.write( "#include <cxxtest/TestListener.h>\n" )
output.write( "#include <cxxtest/TestTracker.h>\n" )
output.write( "#include <cxxtest/TestRunner.h>\n" )
output.write( "#include <cxxtest/RealDescriptions.h>\n" )
if runner:
output.write( "#include <cxxtest/%s.h>\n" % runner )
if gui:
output.write( "#include <cxxtest/%s.h>\n" % gui )
output.write( "\n" )
wrotePreamble = 1
def writeMain( output ):
'''Write the main() function for the test runner'''
if gui:
output.write( 'int main( int argc, char *argv[] ) {\n' )
if noStaticInit:
output.write( ' CxxTest::initialize();\n' )
output.write( ' return CxxTest::GuiTuiRunner<CxxTest::%s, CxxTest::%s>( argc, argv ).run();\n' % (gui, runner) )
output.write( '}\n' )
elif runner:
output.write( 'int main() {\n' )
if noStaticInit:
output.write( ' CxxTest::initialize();\n' )
output.write( ' return CxxTest::%s().run();\n' % runner )
output.write( '}\n' )
wroteWorld = 0
def writeWorld( output ):
'''Write the world definitions'''
global wroteWorld, part
if wroteWorld: return
writePreamble( output )
writeSuites( output )
if root or not part:
writeRoot( output )
if noStaticInit:
writeInitialize( output )
wroteWorld = 1
def writeSuites(output):
'''Write all TestDescriptions and SuiteDescriptions'''
for suite in suites:
writeInclude( output, suite['file'] )
if isGenerated(suite):
generateSuite( output, suite )
if isDynamic(suite):
writeSuitePointer( output, suite )
else:
writeSuiteObject( output, suite )
writeTestList( output, suite )
writeSuiteDescription( output, suite )
writeTestDescriptions( output, suite )
def isGenerated(suite):
'''Checks whether a suite class should be created'''
return suite['generated']
def isDynamic(suite):
'''Checks whether a suite is dynamic'''
return suite.has_key('create')
lastIncluded = ''
def writeInclude(output, file):
'''Add #include "file" statement'''
global lastIncluded
if file == lastIncluded: return
output.writelines( [ '#include "', file, '"\n\n' ] )
lastIncluded = file
def generateSuite( output, suite ):
'''Write a suite declared with CXXTEST_SUITE()'''
output.write( 'class %s : public CxxTest::TestSuite {\n' % suite['name'] )
output.write( 'public:\n' )
for line in suite['lines']:
output.write(line)
output.write( '};\n\n' )
def writeSuitePointer( output, suite ):
'''Create static suite pointer object for dynamic suites'''
if noStaticInit:
output.write( 'static %s *%s;\n\n' % (suite['name'], suite['object']) )
else:
output.write( 'static %s *%s = 0;\n\n' % (suite['name'], suite['object']) )
def writeSuiteObject( output, suite ):
'''Create static suite object for non-dynamic suites'''
output.writelines( [ "static ", suite['name'], " ", suite['object'], ";\n\n" ] )
def writeTestList( output, suite ):
'''Write the head of the test linked list for a suite'''
if noStaticInit:
output.write( 'static CxxTest::List %s;\n' % suite['tlist'] )
else:
output.write( 'static CxxTest::List %s = { 0, 0 };\n' % suite['tlist'] )
def writeTestDescriptions( output, suite ):
'''Write all test descriptions for a suite'''
for test in suite['tests']:
writeTestDescription( output, suite, test )
def writeTestDescription( output, suite, test ):
'''Write test description object'''
output.write( 'static class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
output.write( 'public:\n' )
if not noStaticInit:
output.write( ' %s() : CxxTest::RealTestDescription( %s, %s, %s, "%s" ) {}\n' %
(test['class'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' void runTest() { %s }\n' % runBody( suite, test ) )
output.write( '} %s;\n\n' % test['object'] )
def runBody( suite, test ):
'''Body of TestDescription::run()'''
if isDynamic(suite): return dynamicRun( suite, test )
else: return staticRun( suite, test )
def dynamicRun( suite, test ):
'''Body of TestDescription::run() for test in a dynamic suite'''
return 'if ( ' + suite['object'] + ' ) ' + suite['object'] + '->' + test['name'] + '();'
def staticRun( suite, test ):
'''Body of TestDescription::run() for test in a non-dynamic suite'''
return suite['object'] + '.' + test['name'] + '();'
def writeSuiteDescription( output, suite ):
'''Write SuiteDescription object'''
if isDynamic( suite ):
writeDynamicDescription( output, suite )
else:
writeStaticDescription( output, suite )
def writeDynamicDescription( output, suite ):
'''Write SuiteDescription for a dynamic suite'''
output.write( 'CxxTest::DynamicSuiteDescription<%s> %s' % (suite['name'], suite['dobject']) )
if not noStaticInit:
output.write( '( %s, %s, "%s", %s, %s, %s, %s )' %
(suite['cfile'], suite['line'], suite['name'], suite['tlist'],
suite['object'], suite['create'], suite['destroy']) )
output.write( ';\n\n' )
def writeStaticDescription( output, suite ):
'''Write SuiteDescription for a static suite'''
output.write( 'CxxTest::StaticSuiteDescription %s' % suite['dobject'] )
if not noStaticInit:
output.write( '( %s, %s, "%s", %s, %s )' %
(suite['cfile'], suite['line'], suite['name'], suite['object'], suite['tlist']) )
output.write( ';\n\n' )
def writeRoot(output):
'''Write static members of CxxTest classes'''
output.write( '#include <cxxtest/Root.cpp>\n' )
def writeInitialize(output):
'''Write CxxTest::initialize(), which replaces static initialization'''
output.write( 'namespace CxxTest {\n' )
output.write( ' void initialize()\n' )
output.write( ' {\n' )
for suite in suites:
output.write( ' %s.initialize();\n' % suite['tlist'] )
if isDynamic(suite):
output.write( ' %s = 0;\n' % suite['object'] )
output.write( ' %s.initialize( %s, %s, "%s", %s, %s, %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['name'],
suite['tlist'], suite['object'], suite['create'], suite['destroy']) )
else:
output.write( ' %s.initialize( %s, %s, "%s", %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['name'],
suite['object'], suite['tlist']) )
for test in suite['tests']:
output.write( ' %s.initialize( %s, %s, %s, "%s" );\n' %
(test['object'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' }\n' )
output.write( '}\n' )
main()
| Python |
#!/opt/ActivePython-3.2/bin/python3
#This program is the master GUI for the Manta Ray project.
#version 1.14
#########################COPYRIGHT INFORMATION############################
#Copyright (C) 2013 Kevin.Murphy@ManTech.com #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
#
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
#
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see http://www.gnu.org/licenses/. #
#########################COPYRIGHT INFORMATION############################
from easygui import *
import os
import io
import sys
import string
import logging
import subprocess
import datetime
from easygui import *
from be_mr import *
from check_for_folder import *
from jumplist_mr import *
from entropy_mr import *
from extract_registry_hives_mr import *
from mr_registry import *
from GUI_Timeline_mr import *
from remove_duplicates_mr import *
from carve_unallocated_mr import *
from volatility_mr import *
from exifdata_mr import *
from done import *
from extract_ntfs_artifacts_mr import *
from create_kml_from_exif_mr import *
from plist_processor import *
### SPLASHSCREEN ###########################################################################
def splashscreen():
intro_splashscreen = buttonbox(msg='', image='/usr/local/src/Manta_Ray/images/Mantaray_Logo_Template_Full_Screen.gif',title='MantaRay - ManTech Triage & Analysis System MantaRayForensics.com', choices=('Continue', 'About', 'License', 'Support', 'System Requirements', 'Exit'))
if intro_splashscreen == "About":
try:
subprocess.call(['zenity --text-info --filename=/usr/local/src/Manta_Ray/Tools/Python/about.txt --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --width 800 --height 625'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
sys.exit(0)
splashscreen()
if intro_splashscreen == "License":
try:
subprocess.call(['zenity --text-info --filename=/usr/local/src/Manta_Ray/Tools/Python/MR_GNU_License.txt --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --width 800 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
sys.exit(0)
splashscreen()
if intro_splashscreen == "Support":
try:
subprocess.call(['zenity --text-info --filename=/usr/local/src/Manta_Ray/Tools/Python/support.txt --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --width 800 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
sys.exit(0)
splashscreen()
if intro_splashscreen == "System Requirements":
try:
subprocess.call(['zenity --text-info --filename=/usr/local/src/Manta_Ray/Tools/Python/sys_requirements.txt --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --width 800 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
sys.exit(0)
splashscreen()
if intro_splashscreen == "Exit":
sys.exit(0)
return intro_splashscreen
############################################################################################
#get date/time
now = datetime.datetime.now()
intro_splashscreen = splashscreen()
#disable auto-mount in nautilis - this stops a nautilis window from popping up everytime the mount command is executed
cmd_false = "sudo gsettings set org.gnome.desktop.media-handling automount false && sudo gsettings set org.gnome.desktop.media-handling automount-open false"
try:
subprocess.call([cmd_false], shell=True)
except:
print("Autmount false failed")
##Enter Case Information
if intro_splashscreen:
try:
msg = "Case Information"
title = "MantaRay - ManTech Triage & Analysis System"
fieldNames = ["Case Number","Evidence Number","Examiner Name","Notes"]
fieldValues = [] # we start with blanks for the values
fieldValues = multenterbox(msg, title, fieldNames)
## Verify fields are not blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "":
break # no problems found
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
#writeln("Reply was: %s" % str(fieldValues))
except:
print ("Cancel/Exit chosen")
sys.exit(0)
if fieldValues:
case_number = fieldValues[0]
#substitute _ for / and \
if(re.search(" ", case_number)):
case_number = case_number.replace(" ", "")
print(case_number)
if(re.search("/", case_number)):
case_number = case_number.replace("/", "_")
print(case_number)
if(re.search("\\\\", case_number)):
case_number = case_number.replace("\\", "_")
print(case_number)
evidence_number = fieldValues[1]
examiner_name = fieldValues[2]
case_notes = fieldValues[3]
case_number = case_number + "-" + evidence_number + "-MantaRay_" + now.strftime("%Y-%m-%d_%H_%M_%S_%f")
print("Case Number: " + case_number)
print("Evidence Number: " + evidence_number)
print("Examiner Name: " + examiner_name)
print("Case notes: " + case_notes)
else:
print ("Cancel/Exit chosen.")
sys.exit(0)
##Choose Evidence Type - Image Type
if fieldValues:
try:
evidence_type = subprocess.check_output(['zenity --list --radiolist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Evidence Type" --column="Description" --separator="," TRUE "Bit-Stream Image" ".dd, .img, .001, .E01" FALSE "Directory" "Logical Directory" FALSE "EnCase Logical Evidence File" ".L01" FALSE "Memory Image" "Forensic Image of RAM" FALSE "Single File" "Individual File" --text="Evidence Type Selection" --width 800 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
sys.exit(0)
if evidence_type:
evidence_type = evidence_type.strip()
print("Evidence Type:" + evidence_type)
else:
print ("No evidence type was selected.")
subprocess.call(['zenity --info --title "MantaRay - ManTech Triage & Analysis System" --text="No evidence type was selected. Evidence type is required."'], shell=True, universal_newlines=True)
sys.exit(0)
##Choose Output Directory
if evidence_type:
try:
root_output_dir = subprocess.check_output(['sudo zenity --file-selection --directory --filename="/mnt/hgfs/" --title "Select Root Output Directory" --text="Select Output Directory"'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
sys.exit(0)
if root_output_dir:
print("Output Directory:" + root_output_dir.strip())
#create output directory
folder_path = root_output_dir.strip() + "/" + case_number
folder_path = check_for_folder(folder_path, "NONE")
else:
print ("No output directory was selected.")
sys.exit(0)
##Create and open log file
gui_log_file = folder_path + "/" + case_number + "_MantaRay_logfile.txt"
gui_outfile = open(gui_log_file, 'a')
##Log previous user created information
now = datetime.datetime.now()
gui_outfile.write(now.strftime("%Y-%m-%d %H:%M:%S")+ "\n\n")
gui_outfile.write("Case Number:"+"\t\t"+case_number+"\n"+"Evidence Number:"+"\t"+fieldValues[1]+"\n"+"Examiner Name:"+"\t\t"+fieldValues[2]+"\n\n")
gui_outfile.write("Notes:" +"\t" + fieldValues[3] + "\n\n")
gui_outfile.write("Evidence Type:" + "\t" + evidence_type + "\n")
gui_outfile.write("Output Folder:" + "\t" + folder_path + "\n")
##Choose Processing Scripts
if evidence_type == "Bit-Stream Image":
try:
processing_scripts = subprocess.check_output(['zenity --list --checklist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processing Tool" --column="Description" --separator="," FALSE "BulkExtractor" "Scans for a large number of pre-defined regular expressions" FALSE "Calculate Entropy" "Pseudorandom number sequence test (ENT)" FALSE "Create KML from JPG EXIF Data" "Create Google Earth .kml file from EXIF data found in JPG images" FALSE "EXIF Tool" "Read meta information in files" FALSE "Foremost" "Recover files from a disk image based on headers and footers (Unallocated Space)" FALSE "Jumplist Parser" "Windows Vista/7 Jumplist Exploitation" FALSE "NTFS Artifact Extractor" "\$MFT/\$LogFile/((\$USNJRNL•\$J (Vista/7 Only)) Overt & Shadow Volume Extraction" FALSE "PLIST Processor" "Extracts triage data from selected .plist files" FALSE "Registry Hive Extractor//Regripper" "Extract Registry from overt, deleted, unallocated, shadow volumes, restore-points & process with RegRipper" FALSE "Super Timeline" "Parse various log files and artifacts for timeline analysis" --text="Processing Tool Selection | Evidence Type: Bit-Stream Image" --width 1100 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Bit-Stream Image: Processing Tool Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if processing_scripts:
print("Processing Scripts: " + processing_scripts.strip())
gui_outfile.write("Processing Scripts:" + "\t" + processing_scripts.strip() + "\n")
else:
print ("No processing scripts were selected.")
gui_outfile.write("Bit-Stream Image: Script Selection - No processing scripts were selected.")
sys.exit(0)
try:
evidence_path = subprocess.check_output(['zenity --file-selection --filename="/mnt/hgfs/" --file-filter=""*.DD" "*.dd" "*.IMG" "*.img" "*.001" "*.E01"" --title "Select Bit-Stream Image to Process"'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Bit-Stream Image: Image Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if evidence_path:
print("Bit-Stream Image: " + evidence_path.strip())
gui_outfile.write("Bit-Stream Image:" + "\t" + evidence_path.strip() + "\n")
else:
print ("No Image was selected.")
gui_outfile.write("Bit-Stream Image: Bit-Stream Image Selection - No Bit-stream Image was selected.")
sys.exit(0)
if evidence_type == "EnCase Logical Evidence File":
try:
processing_scripts = subprocess.check_output(['zenity --list --checklist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processing Tool" --column="Description" --separator="," FALSE "BulkExtractor" "Scans for a large number of pre-defined regular expressions." FALSE "Calculate Entropy" "Pseudorandom number sequence test (ENT)" FALSE "Create KML from JPG EXIF Data" "Create Google Earth .kml file from EXIF data found in JPG images" FALSE "PLIST Processor" "Extracts triage data from selected .plist files" FALSE "Super Timeline" "Parse various log files and artifacts for timeline analysis" --text="Processing Tool Selection | Evidence Type: EnCase Logical Evidence File" --width 1100 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("EnCase Logical Evidence File: Processing Tool Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if processing_scripts:
print("Processing Scripts: " + processing_scripts)
gui_outfile.write("Processing Scripts:" + "\t" + processing_scripts + "\n")
else:
print ("No processing scripts were selected.")
gui_outfile.write("EnCase Logical Evidence File: Processing Tool Selection - No processing scripts were selected.")
sys.exit(0)
try:
evidence_path = subprocess.check_output(['zenity --file-selection --filename="/mnt/hgfs/" --file-filter=""*.L01" "*.l01"" --title "Select EnCase Logical Evidence File to Process"'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("EnCase Logical Evidence File: File Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if evidence_type == "Directory":
try:
processing_scripts = subprocess.check_output(['zenity --list --checklist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processing Tool" --column="Description" --separator="," FALSE "BulkExtractor" "Scans for a large number of pre-defined regular expressions" FALSE "Calculate Entropy" "Pseudorandom number sequence test (ENT)" FALSE "Create KML from JPG EXIF Data" "Create Google Earth .kml file from EXIF data found in JPG images" FALSE "Delete Duplicate Files " "Delete duplicate files from the selected directory (Recursive)" FALSE "EXIF Tool" "Read meta information in files" FALSE "PLIST Processor" "Extracts triage data from selected .plist files" FALSE "Super Timeline" "Parse various log files and artifacts for timeline analysis" --text="Processing Tool Selection" --text="Processing Tool Selection | Evidence Type: Directory" --width 1100 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Directory: Processing Tool Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if processing_scripts:
print("Processing Scripts: " + processing_scripts.strip())
gui_outfile.write("Processing Scripts:" + "\t" + processing_scripts.strip() + "\n")
else:
print ("No processing scripts were selected.")
gui_outfile.write("Directory: Processing Tool Selection - No processing scripts were selected.")
sys.exit(0)
try:
evidence_path = subprocess.check_output(['zenity --file-selection --filename="/mnt/hgfs/" --directory --title "Select Directory to Process"'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Directory: Directory Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if evidence_type == "Memory Image":
try:
processing_scripts = subprocess.check_output(['zenity --list --checklist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processing Tool" --column="Description" --separator="," FALSE "BulkExtractor" "Scans for a large number of pre-defined regular expressions" FALSE "Volatility" "Extraction of digital artifacts from volatile memory - Requires user input - best run alone" --text="Processing Tool Selection | Evidence Type: Memory Image" --width 1100 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Memory Image: Processing Tool Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if processing_scripts:
print("Processing Scripts: " + processing_scripts.strip())
gui_outfile.write("Processing Scripts:" + "\t" + processing_scripts.strip() + "\n")
else:
print ("No processing scripts were selected.")
gui_outfile.write("Memory Image: Processing Tool Selection - No processing scripts were selected.")
sys.exit(0)
try:
evidence_path = subprocess.check_output(['zenity --file-selection --filename="/mnt/hgfs/" --file-filter=""*.DD" "*.dd" "*.IMG" "*.img" "*.001" "*.BIN" "*.bin" "*.MEM" "*.mem" " --title "Select Memory Image to Process"'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Memory Image: Directory Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if evidence_type == "Single File":
try:
processing_scripts = subprocess.check_output(['zenity --list --checklist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processing Tool" --column="Description" --separator="," FALSE "BulkExtractor" "Scans for a large number of pre-defined regular expressions." FALSE "Calculate Entropy" "Pseudorandom number sequence test (ENT)" FALSE "Create KML from JPG EXIF Data" "Create Google Earth .kml file from EXIF data found in JPG images" --text="Processing Tool Selection | Evidence Type: Single File" --width 1100 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Single File: Processing Tool Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if processing_scripts:
print("Processing Scripts: " + processing_scripts.strip())
gui_outfile.write("Processing Scripts:" + "\t" + processing_scripts.strip() + "\n")
else:
print ("No processing scripts were selected.")
gui_outfile.write("Single File: Processing Tool Selection - No processing scripts were selected.")
sys.exit(0)
try:
evidence_path = subprocess.check_output(['zenity --file-selection --filename="/mnt/hgfs/" --title "Select Single File to Process"'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Single File: Directory Selection - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
#split string on comma
processing_scripts = processing_scripts.strip()
processing_scripts_list = processing_scripts.split(",")
#set debug option
try:
debug_mode = subprocess.check_output(['zenity --list --radiolist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Debug Option" --column="Description" --column="Warning" --separator="," TRUE "OFF" "Default mode, no verbose error logging" "" FALSE "ON" "Debugging mode, verbose error logging" "All processes will stop at first error" --text="Debugging Option Selection" --width 800 --height 175'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Debug Options: Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if debug_mode:
debug_mode = debug_mode.strip()
print("Debug Mode: " + debug_mode)
gui_outfile.write("Debug Mode:" + "\t" + debug_mode + "\n")
#Gather User Specified Options for Processing Tools
for x in processing_scripts_list:
if x == 'BulkExtractor':
try:
bulkextractor_options = subprocess.check_output(['zenity --list --checklist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processing Option" --column="Description" --separator="," FALSE "Keyword List" "Search for case specific keyword list" FALSE "Whitelist" "Remove known features (artifacts) from process output" --text="Processing Options - BulkExtractor" --width 800 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("BulkExtractor Options: Processing Options - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if bulkextractor_options:
print("BulkExtractor Options: " + bulkextractor_options.strip())
gui_outfile.write("BulkExtractor:" + "\t" + bulkextractor_options.strip() + "\n")
else:
print ("BulkExtractor Options: No options were selected.")
gui_outfile.write("BulkExtractor Options: Processing Options - No processing scripts were selected.")
#sys.exit(0)
bulkextractor_options = bulkextractor_options.strip()
bulkextractor_options_list = bulkextractor_options.split(",")
#initialize bulk_extractor options
keyword_list_path = "NONE"
whitelist_path = "NONE"
for item in bulkextractor_options_list:
if item == 'Keyword List':
keyword_list_path = subprocess.check_output(['zenity --file-selection --filename="/mnt/hgfs/" --title "Select BulkExtractor Keyword List"'], shell=True, universal_newlines=True)
print("Keyword List: " + keyword_list_path.strip())
gui_outfile.write("Keyword List:" + "\t" + keyword_list_path.strip() + "\n")
if item == 'Whitelist':
whitelist_path = subprocess.check_output(['zenity --file-selection --filename="/mnt/hgfs/" --title "Select BulkExtractor Whitelist"'], shell=True, universal_newlines=True)
print("Whitelist: " + whitelist_path.strip())
gui_outfile.write("Whitelist:" + "\t" + whitelist_path.strip() + "\n")
try:
bulkextractor_processor = subprocess.check_output(['zenity --list --radiolist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processor Performance" --column="Description" --separator="," FALSE "Speed-Slow" "Minimum Processing Cores" TRUE "Speed-Med" "Medium Processing Cores (Recommended)" FALSE "Speed-Fast" "Maximum Processing Cores (Warning - Processor Intensive)" --text="Processing Performance - BulkExtractor" --width 800 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("BulkExtractor Processor: Processor Options - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
print("BulkExtractor Performance: " + bulkextractor_processor.strip())
gui_outfile.write("BulkExtractor Performance:" + "\t" + bulkextractor_processor.strip() + "\n")
#pass variables to bulk_extractor module
elif x == 'Foremost':
try:
foremost_options = subprocess.check_output(['zenity --list --radiolist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processing Option" --column="Description" --separator="," TRUE "Default File Signatures" "jpg,gif,png,bmp,avi,exe,mpg,wav,riff,wmv,mov,pdf,ole,doc,zip,rar,htm,cpp" FALSE "Configuration File" "Use configuration file - (/etc/foremost.conf)" --text="Processing Options - Foremost" --width 800 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Foremost Options: Processing Options - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
print("Foremost Options: " + foremost_options.strip())
gui_outfile.write("Foremost Options:" + "\t" + foremost_options.strip() + "\n")
#Let user select individual file signatures to carve for
if(re.search('Default', foremost_options)):
cmd_string = "jpg,gif,bmp,avi,exe,mpg,wav,mov,pdf,ole,doc,zip,rar,htm,wmv,png,mp4"
elif x == 'Super Timeline':
try:
select_timezone = subprocess.check_output(['zenity --question --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --text="Non-english unicode timezones must be set manually. If there is a chance the case has non-english timezones, verify timezone using other methods and set this option manually. A future release of MantaRay will provide automatic verification of all timezones prior to this selction option. Do you want to set the SuperTimeline timezone manually?" --width 800 --height 200'], shell=True, universal_newlines=True)
user_defined_timezone = subprocess.check_output(['zenity --list --radiolist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Timezone" --separator="," TRUE "UTC" FALSE "AKST9AKDT" FALSE "Africa/Abidjan" FALSE "Africa/Accra" FALSE "Africa/Addis_Ababa" FALSE "Africa/Algiers" FALSE "Africa/Asmara" FALSE "Africa/Asmera" FALSE "Africa/Bamako" FALSE "Africa/Bangui" FALSE "Africa/Banjul" FALSE "Africa/Bissau" FALSE "Africa/Blantyre" FALSE "Africa/Brazzaville" FALSE "Africa/Bujumbura" FALSE "Africa/Cairo" FALSE "Africa/Casablanca" FALSE "Africa/Ceuta" FALSE "Africa/Conakry" FALSE "Africa/Dakar" FALSE "Africa/Dar_es_Salaam" FALSE "Africa/Djibouti" FALSE "Africa/Douala" FALSE "Africa/El_Aaiun" FALSE "Africa/Freetown" FALSE "Africa/Gaborone" FALSE "Africa/Harare" FALSE "Africa/Johannesburg" FALSE "Africa/Juba" FALSE "Africa/Kampala" FALSE "Africa/Khartoum" FALSE "Africa/Kigali" FALSE "Africa/Kinshasa" FALSE "Africa/Lagos" FALSE "Africa/Libreville" FALSE "Africa/Lome" FALSE "Africa/Luanda" FALSE "Africa/Lubumbashi" FALSE "Africa/Lusaka" FALSE "Africa/Malabo" FALSE "Africa/Maputo" FALSE "Africa/Maseru" FALSE "Africa/Mbabane" FALSE "Africa/Mogadishu" FALSE "Africa/Monrovia" FALSE "Africa/Nairobi" FALSE "Africa/Ndjamena" FALSE "Africa/Niamey" FALSE "Africa/Nouakchott" FALSE "Africa/Ouagadougou" FALSE "Africa/Porto-Novo" FALSE "Africa/Sao_Tome" FALSE "Africa/Timbuktu" FALSE "Africa/Tripoli" FALSE "Africa/Tunis" FALSE "Africa/Windhoek" FALSE "America/Adak" FALSE "America/Anchorage" FALSE "America/Anguilla" FALSE "America/Antigua" FALSE "America/Araguaina" FALSE "America/Argentina/Buenos_Aires" FALSE "America/Argentina/Catamarca" FALSE "America/Argentina/ComodRivadavia" FALSE "America/Argentina/Cordoba" FALSE "America/Argentina/Jujuy" FALSE "America/Argentina/La_Rioja" FALSE "America/Argentina/Mendoza" FALSE "America/Argentina/Rio_Gallegos" FALSE "America/Argentina/Salta" FALSE "America/Argentina/San_Juan" FALSE "America/Argentina/San_Luis" FALSE "America/Argentina/Tucuman" FALSE "America/Argentina/Ushuaia" FALSE "America/Aruba" FALSE "America/Asuncion" FALSE "America/Atikokan" FALSE "America/Atka" FALSE "America/Bahia" FALSE "America/Bahia_Banderas" FALSE "America/Barbados" FALSE "America/Belem" FALSE "America/Belize" FALSE "America/Blanc-Sablon" FALSE "America/Boa_Vista" FALSE "America/Bogota" FALSE "America/Boise" FALSE "America/Buenos_Aires" FALSE "America/Cambridge_Bay" FALSE "America/Campo_Grande" FALSE "America/Cancun" FALSE "America/Caracas" FALSE "America/Catamarca" FALSE "America/Cayenne" FALSE "America/Cayman" FALSE "America/Chicago" FALSE "America/Chihuahua" FALSE "America/Coral_Harbour" FALSE "America/Cordoba" FALSE "America/Costa_Rica" FALSE "America/Cuiaba" FALSE "America/Curacao" FALSE "America/Danmarkshavn" FALSE "America/Dawson" FALSE "America/Dawson_Creek" FALSE "America/Denver" FALSE "America/Detroit" FALSE "America/Dominica" FALSE "America/Edmonton" FALSE "America/Eirunepe" FALSE "America/El_Salvador" FALSE "America/Ensenada" FALSE "America/Fort_Wayne" FALSE "America/Fortaleza" FALSE "America/Glace_Bay" FALSE "America/Godthab" FALSE "America/Goose_Bay" FALSE "America/Grand_Turk" FALSE "America/Grenada" FALSE "America/Guadeloupe" FALSE "America/Guatemala" FALSE "America/Guayaquil" FALSE "America/Guyana" FALSE "America/Halifax" FALSE "America/Havana" FALSE "America/Hermosillo" FALSE "America/Indiana/Indianapolis" FALSE "America/Indiana/Knox" FALSE "America/Indiana/Marengo" FALSE "America/Indiana/Petersburg" FALSE "America/Indiana/Tell_City" FALSE "America/Indiana/Vevay" FALSE "America/Indiana/Vincennes" FALSE "America/Indiana/Winamac" FALSE "America/Indianapolis" FALSE "America/Inuvik" FALSE "America/Iqaluit" FALSE "America/Jamaica" FALSE "America/Jujuy" FALSE "America/Juneau" FALSE "America/Kentucky/Louisville" FALSE "America/Kentucky/Monticello" FALSE "America/Knox_IN" FALSE "America/Kralendijk" FALSE "America/La_Paz" FALSE "America/Lima" FALSE "America/Los_Angeles" FALSE "America/Louisville" FALSE "America/Lower_Princes" FALSE "America/Maceio" FALSE "America/Managua" FALSE "America/Manaus" FALSE "America/Marigot" FALSE "America/Martinique" FALSE "America/Matamoros" FALSE "America/Mazatlan" FALSE "America/Mendoza" FALSE "America/Menominee" FALSE "America/Merida" FALSE "America/Metlakatla" FALSE "America/Mexico_City" FALSE "America/Miquelon" FALSE "America/Moncton" FALSE "America/Monterrey" FALSE "America/Montevideo" FALSE "America/Montreal" FALSE "America/Montserrat" FALSE "America/Nassau" FALSE "America/New_York" FALSE "America/Nipigon" FALSE "America/Nome" FALSE "America/Noronha" FALSE "America/North_Dakota/Beulah" FALSE "America/North_Dakota/Center" FALSE "America/North_Dakota/New_Salem" FALSE "America/Ojinaga" FALSE "America/Panama" FALSE "America/Pangnirtung" FALSE "America/Paramaribo" FALSE "America/Phoenix" FALSE "America/Port-au-Prince" FALSE "America/Port_of_Spain" FALSE "America/Porto_Acre" FALSE "America/Porto_Velho" FALSE "America/Puerto_Rico" FALSE "America/Rainy_River" FALSE "America/Rankin_Inlet" FALSE "America/Recife" FALSE "America/Regina" FALSE "America/Resolute" FALSE "America/Rio_Branco" FALSE "America/Rosario" FALSE "America/Santa_Isabel" FALSE "America/Santarem" FALSE "America/Santiago" FALSE "America/Santo_Domingo" FALSE "America/Sao_Paulo" FALSE "America/Scoresbysund" FALSE "America/Shiprock" FALSE "America/Sitka" FALSE "America/St_Barthelemy" FALSE "America/St_Johns" FALSE "America/St_Kitts" FALSE "America/St_Lucia" FALSE "America/St_Thomas" FALSE "America/St_Vincent" FALSE "America/Swift_Current" FALSE "America/Tegucigalpa" FALSE "America/Thule" FALSE "America/Thunder_Bay" FALSE "America/Tijuana" FALSE "America/Toronto" FALSE "America/Tortola" FALSE "America/Vancouver" FALSE "America/Virgin" FALSE "America/Whitehorse" FALSE "America/Winnipeg" FALSE "America/Yakutat" FALSE "America/Yellowknife" FALSE "Antarctica/Casey" FALSE "Antarctica/Davis" FALSE "Antarctica/DumontDUrville" FALSE "Antarctica/Macquarie" FALSE "Antarctica/Mawson" FALSE "Antarctica/McMurdo" FALSE "Antarctica/Palmer" FALSE "Antarctica/Rothera" FALSE "Antarctica/South_Pole" FALSE "Antarctica/Syowa" FALSE "Antarctica/Vostok" FALSE "Arctic/Longyearbyen" FALSE "Asia/Aden" FALSE "Asia/Almaty" FALSE "Asia/Amman" FALSE "Asia/Anadyr" FALSE "Asia/Aqtau" FALSE "Asia/Aqtobe" FALSE "Asia/Ashgabat" FALSE "Asia/Ashkhabad" FALSE "Asia/Baghdad" FALSE "Asia/Bahrain" FALSE "Asia/Baku" FALSE "Asia/Bangkok" FALSE "Asia/Beirut" FALSE "Asia/Bishkek" FALSE "Asia/Brunei" FALSE "Asia/Calcutta" FALSE "Asia/Choibalsan" FALSE "Asia/Chongqing" FALSE "Asia/Chungking" FALSE "Asia/Colombo" FALSE "Asia/Dacca" FALSE "Asia/Damascus" FALSE "Asia/Dhaka" FALSE "Asia/Dili" FALSE "Asia/Dubai" FALSE "Asia/Dushanbe" FALSE "Asia/Gaza" FALSE "Asia/Harbin" FALSE "Asia/Hebron" FALSE "Asia/Ho_Chi_Minh" FALSE "Asia/Hong_Kong" FALSE "Asia/Hovd" FALSE "Asia/Irkutsk" FALSE "Asia/Istanbul" FALSE "Asia/Jakarta" FALSE "Asia/Jayapura" FALSE "Asia/Jerusalem" FALSE "Asia/Kabul" FALSE "Asia/Kamchatka" FALSE "Asia/Karachi" FALSE "Asia/Kashgar" FALSE "Asia/Kathmandu" FALSE "Asia/Katmandu" FALSE "Asia/Kolkata" FALSE "Asia/Krasnoyarsk" FALSE "Asia/Kuala_Lumpur" FALSE "Asia/Kuching" FALSE "Asia/Kuwait" FALSE "Asia/Macao" FALSE "Asia/Macau" FALSE "Asia/Magadan" FALSE "Asia/Makassar" FALSE "Asia/Manila" FALSE "Asia/Muscat" FALSE "Asia/Nicosia" FALSE "Asia/Novokuznetsk" FALSE "Asia/Novosibirsk" FALSE "Asia/Omsk" FALSE "Asia/Oral" FALSE "Asia/Phnom_Penh" FALSE "Asia/Pontianak" FALSE "Asia/Pyongyang" FALSE "Asia/Qatar" FALSE "Asia/Qyzylorda" FALSE "Asia/Rangoon" FALSE "Asia/Riyadh" FALSE "Asia/Saigon" FALSE "Asia/Sakhalin" FALSE "Asia/Samarkand" FALSE "Asia/Seoul" FALSE "Asia/Shanghai" FALSE "Asia/Singapore" FALSE "Asia/Taipei" FALSE "Asia/Tashkent" FALSE "Asia/Tbilisi" FALSE "Asia/Tehran" FALSE "Asia/Tel_Aviv" FALSE "Asia/Thimbu" FALSE "Asia/Thimphu" FALSE "Asia/Tokyo" FALSE "Asia/Ujung_Pandang" FALSE "Asia/Ulaanbaatar" FALSE "Asia/Ulan_Bator" FALSE "Asia/Urumqi" FALSE "Asia/Vientiane" FALSE "Asia/Vladivostok" FALSE "Asia/Yakutsk" FALSE "Asia/Yekaterinburg" FALSE "Asia/Yerevan" FALSE "Atlantic/Azores" FALSE "Atlantic/Bermuda" FALSE "Atlantic/Canary" FALSE "Atlantic/Cape_Verde" FALSE "Atlantic/Faeroe" FALSE "Atlantic/Faroe" FALSE "Atlantic/Jan_Mayen" FALSE "Atlantic/Madeira" FALSE "Atlantic/Reykjavik" FALSE "Atlantic/South_Georgia" FALSE "Atlantic/St_Helena" FALSE "Atlantic/Stanley" FALSE "Australia/ACT" FALSE "Australia/Adelaide" FALSE "Australia/Brisbane" FALSE "Australia/Broken_Hill" FALSE "Australia/Canberra" FALSE "Australia/Currie" FALSE "Australia/Darwin" FALSE "Australia/Eucla" FALSE "Australia/Hobart" FALSE "Australia/LHI" FALSE "Australia/Lindeman" FALSE "Australia/Lord_Howe" FALSE "Australia/Melbourne" FALSE "Australia/NSW" FALSE "Australia/North" FALSE "Australia/Perth" FALSE "Australia/Queensland" FALSE "Australia/South" FALSE "Australia/Sydney" FALSE "Australia/Tasmania" FALSE "Australia/Victoria" FALSE "Australia/West" FALSE "Australia/Yancowinna" FALSE "Brazil/Acre" FALSE "Brazil/DeNoronha" FALSE "Brazil/East" FALSE "Brazil/West" FALSE "CET" FALSE "CST6CDT" FALSE "Canada/Atlantic" FALSE "Canada/Central" FALSE "Canada/East-Saskatchewan" FALSE "Canada/Eastern" FALSE "Canada/Mountain" FALSE "Canada/Newfoundland" FALSE "Canada/Pacific" FALSE "Canada/Saskatchewan" FALSE "Canada/Yukon" FALSE "Chile/Continental" FALSE "Chile/EasterIsland" FALSE "Cuba" FALSE "EET" FALSE "EST" FALSE "EST5EDT" FALSE "Egypt" FALSE "Eire" FALSE "Etc/GMT" FALSE "Etc/GMT+0" FALSE "Etc/UCT" FALSE "Etc/UTC" FALSE "Etc/Universal" FALSE "Etc/Zulu" FALSE "Europe/Amsterdam" FALSE "Europe/Andorra" FALSE "Europe/Athens" FALSE "Europe/Belfast" FALSE "Europe/Belgrade" FALSE "Europe/Berlin" FALSE "Europe/Bratislava" FALSE "Europe/Brussels" FALSE "Europe/Bucharest" FALSE "Europe/Budapest" FALSE "Europe/Chisinau" FALSE "Europe/Copenhagen" FALSE "Europe/Dublin" FALSE "Europe/Gibraltar" FALSE "Europe/Guernsey" FALSE "Europe/Helsinki" FALSE "Europe/Isle_of_Man" FALSE "Europe/Istanbul" FALSE "Europe/Jersey" FALSE "Europe/Kaliningrad" FALSE "Europe/Kiev" FALSE "Europe/Lisbon" FALSE "Europe/Ljubljana" FALSE "Europe/London" FALSE "Europe/Luxembourg" FALSE "Europe/Madrid" FALSE "Europe/Malta" FALSE "Europe/Mariehamn" FALSE "Europe/Minsk" FALSE "Europe/Monaco" FALSE "Europe/Moscow" FALSE "Europe/Nicosia" FALSE "Europe/Oslo" FALSE "Europe/Paris" FALSE "Europe/Podgorica" FALSE "Europe/Prague" FALSE "Europe/Riga" FALSE "Europe/Rome" FALSE "Europe/Samara" FALSE "Europe/San_Marino" FALSE "Europe/Sarajevo" FALSE "Europe/Simferopol" FALSE "Europe/Skopje" FALSE "Europe/Sofia" FALSE "Europe/Stockholm" FALSE "Europe/Tallinn" FALSE "Europe/Tirane" FALSE "Europe/Tiraspol" FALSE "Europe/Uzhgorod" FALSE "Europe/Vaduz" FALSE "Europe/Vatican" FALSE "Europe/Vienna" FALSE "Europe/Vilnius" FALSE "Europe/Volgograd" FALSE "Europe/Warsaw" FALSE "Europe/Zagreb" FALSE "Europe/Zaporozhye" FALSE "Europe/Zurich" FALSE "GB" FALSE "GB-Eire" FALSE "GMT" FALSE "GMT+0" FALSE "GMT-0" FALSE "GMT0" FALSE "Greenwich" FALSE "HST" FALSE "Hongkong" FALSE "Iceland" FALSE "Indian/Antananarivo" FALSE "Indian/Chagos" FALSE "Indian/Christmas" FALSE "Indian/Cocos" FALSE "Indian/Comoro" FALSE "Indian/Kerguelen" FALSE "Indian/Mahe" FALSE "Indian/Maldives" FALSE "Indian/Mauritius" FALSE "Indian/Mayotte" FALSE "Indian/Reunion" FALSE "Iran" FALSE "Israel" FALSE "JST-9" FALSE "Jamaica" FALSE "Japan" FALSE "Kwajalein" FALSE "Libya" FALSE "MET" FALSE "MST" FALSE "MST7MDT" FALSE "Mexico/BajaNorte" FALSE "Mexico/BajaSur" FALSE "Mexico/General" FALSE "NZ" FALSE "NZ-CHAT" FALSE "Navajo" FALSE "PRC" FALSE "PST8PDT" FALSE "Pacific/Apia" FALSE "Pacific/Auckland" FALSE "Pacific/Chatham" FALSE "Pacific/Chuuk" FALSE "Pacific/Easter" FALSE "Pacific/Efate" FALSE "Pacific/Enderbury" FALSE "Pacific/Fakaofo" FALSE "Pacific/Fiji" FALSE "Pacific/Funafuti" FALSE "Pacific/Galapagos" FALSE "Pacific/Gambier" FALSE "Pacific/Guadalcanal" FALSE "Pacific/Guam" FALSE "Pacific/Honolulu" FALSE "Pacific/Johnston" FALSE "Pacific/Kiritimati" FALSE "Pacific/Kosrae" FALSE "Pacific/Kwajalein" FALSE "Pacific/Majuro" FALSE "Pacific/Marquesas" FALSE "Pacific/Midway" FALSE "Pacific/Nauru" FALSE "Pacific/Niue" FALSE "Pacific/Norfolk" FALSE "Pacific/Noumea" FALSE "Pacific/Pago_Pago" FALSE "Pacific/Palau" FALSE "Pacific/Pitcairn" FALSE "Pacific/Pohnpei" FALSE "Pacific/Ponape" FALSE "Pacific/Port_Moresby" FALSE "Pacific/Rarotonga" FALSE "Pacific/Saipan" FALSE "Pacific/Samoa" FALSE "Pacific/Tahiti" FALSE "Pacific/Tarawa" FALSE "Pacific/Tongatapu" FALSE "Pacific/Truk" FALSE "Pacific/Wake" FALSE "Pacific/Wallis" FALSE "Pacific/Yap" FALSE "Poland" FALSE "Portugal" FALSE "ROC" FALSE "ROK" FALSE "Singapore" FALSE "Turkey" FALSE "UCT" FALSE "US/Alaska" FALSE "US/Aleutian" FALSE "US/Arizona" FALSE "US/Central" FALSE "US/East-Indiana" FALSE "US/Eastern" FALSE "US/Hawaii" FALSE "US/Indiana-Starke" FALSE "US/Michigan" FALSE "US/Mountain" FALSE "US/Pacific" FALSE "US/Pacific-New" FALSE "US/Samoa" FALSE "Universal" FALSE "W-SU" FALSE "WET" FALSE "Zulu" --text="Timezone Selection" --width 800 --height 800'], shell=True, universal_newlines=True)
user_defined_timezone = user_defined_timezone.strip()
print("Timezone Option: " + user_defined_timezone)
gui_outfile.write("Timezone option: " + "\t" + user_defined_timezone + "\n")
except:
user_defined_timezone = "NONE"
print("Timezone Option: User selected no/cancel rather than set timezone manually")
gui_outfile.write("Timezone Option: User selected no/cancel rather than set timezone manually" + "\n")
elif x == 'Registry Hive Extractor//Regripper':
try:
registry_extractor_options = subprocess.check_output(['zenity --list --checklist --title "MantaRay - ManTech Triage & Analysis System MantaRayForensics.com" --column="Selection" --column="Processing Option" --column="Description" --separator="," FALSE "Overt,Deleted,Restore-Points" "Overt/Deleted/Restore-Points(WinXP) Registry Hives" FALSE "Unallocated" "Unallocated Registry Hives (regf Header - 50MB Length)" FALSE "Shadow Volumes" "Shadow Volume Registry Hives (Windows Vista/7)" --text="Processing Options - Registry Extractor" --width 800 --height 400'], shell=True, universal_newlines=True)
except:
print ("Cancel/Exit chosen")
gui_outfile.write("Registry Extractor Options: Processing Options - Aborted by user - Cancel/Exit chosen")
sys.exit(0)
if registry_extractor_options:
print("Registry Extractor Options: " + registry_extractor_options.strip())
gui_outfile.write("Registry Extractor Options:" + "\t" + registry_extractor_options.strip() + "\n")
else:
print ("Registry Extractor Options: No options were selected.")
gui_outfile.write("Registry Extractor Options: Processing Options - No processing scripts were selected.")
sys.exit(0)
#add code to Master outfile to break section between input and tool success
gui_outfile.write("\n\n*************************** PROCESSING STATUS ***************************\n")
#loop through processing_scripts and execute each one passing variables to each script
for x in processing_scripts_list:
print(x)
if x == 'BulkExtractor':
if(whitelist_path != "") and (keyword_list_path != ""):
if(debug_mode == "ON"):
be_mr(evidence_type, case_number, folder_path, evidence_path.strip(), whitelist_path.strip(), bulkextractor_processor, keyword_list_path.strip())
gui_outfile.write("Bulk_Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
be_mr(evidence_type, case_number, folder_path, evidence_path.strip(), whitelist_path.strip(), bulkextractor_processor, keyword_list_path.strip())
gui_outfile.write("Bulk_Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to bulk_extractor failed")
gui_outfile.write("Bulk_Extractor failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif(whitelist_path != "") and (keyword_list_path == ""):
if(debug_mode == "ON"):
be_mr(evidence_type, case_number, folder_path, evidence_path.strip(), whitelist_path.strip(), bulkextractor_processor, "NONE")
gui_outfile.write("Bulk_Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
be_mr(evidence_type, case_number, folder_path, evidence_path.strip(), whitelist_path.strip(), bulkextractor_processor, "NONE")
gui_outfile.write("Bulk_Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to bulk_extractor failed")
gui_outfile.write("Bulk_Extractor failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif(whitelist_path == "") and (keyword_list_path != ""):
if(debub_mode):
be_mr(evidence_type, case_number, folder_path, evidence_path.strip(), "NONE", bulkextractor_processor, keyword_list_path.strip())
gui_outfile.write("Bulk_Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
be_mr(evidence_type, case_number, folder_path, evidence_path.strip(), "NONE", bulkextractor_processor, keyword_list_path.strip())
gui_outfile.write("Bulk_Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to bulk_extractor failed")
gui_outfile.write("Bulk_Extractor failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif(whitelist_path == "") and (keyword_list_path == ""):
if(debug_mode == "ON"):
be_mr(evidence_type, case_number, folder_path, evidence_path.strip(), "NONE", bulkextractor_processor, "NONE")
gui_outfile.write("Bulk_Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
be_mr(evidence_type, case_number, folder_path, evidence_path.strip(), "NONE", bulkextractor_processor, "NONE")
gui_outfile.write("Bulk_Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to bulk_extractor failed")
gui_outfile.write("Bulk_Extractor failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x =='Jumplist Parser':
if(debug_mode == "ON"):
jumplist_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("Jumplist Parser...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
jumplist_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("Jumplist Parser...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Jumplist parser failed")
gui_outfile.write("Jumplist Parser failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x == 'Calculate Entropy':
if(debug_mode == "ON"):
entropy_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("Calculate Entropy...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
entropy_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("Calculate Entropy...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Entropy calculator failed")
gui_outfile.write("Calculate Entropy failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x == 'Registry Hive Extractor//Regripper':
if(debug_mode == "ON"):
folder_to_process = extract_registry_hives_mr(evidence_type, case_number, folder_path, evidence_path.strip(),registry_extractor_options.strip())
gui_outfile.write("Registry Hive Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
mr_registry(case_number, folder_to_process, folder_path) #process extracted reg hives w/ rr
gui_outfile.write("Regripper...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
folder_to_process = extract_registry_hives_mr(evidence_type, case_number, folder_path, evidence_path.strip(),registry_extractor_options.strip())
gui_outfile.write("Registry Hive Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Extract Registry hives failed")
gui_outfile.write("Registry Hive Extractor failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
try:
mr_registry(case_number, folder_to_process, folder_path)
gui_outfile.write("Regripper...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to RegRipper failed")
gui_outfile.write("RegRipper failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x == 'Super Timeline':
if(debug_mode == "ON"):
GUI_Timeline_mr(evidence_type, case_number, folder_path, evidence_path.strip(), user_defined_timezone)
gui_outfile.write("Super Timeline...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
GUI_Timeline_mr(evidence_type, case_number, folder_path, evidence_path.strip(), user_defined_timezone)
gui_outfile.write("Super Timeline...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Super Timeline failed")
gui_outfile.write("Super Timeline failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x == 'Delete Duplicate Files':
if(debug_mode == "ON"):
remove_duplicates_mr(folder_path, evidence_path.strip())
gui_outfile.write("Delete Duplicate Files...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
remove_duplicates_mr(folder_path, evidence_path.strip())
gui_outfile.write("Delete Duplicate Files...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Remove_Duplicates failed")
gui_outfile.write("Remove Duplicates failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x =='Foremost':
if(re.search('Default', foremost_options)):
if(debug_mode == "ON"):
carve_unallocated_mr(evidence_type, case_number, folder_path, evidence_path.strip(), cmd_string)
gui_outfile.write("Foremost...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
carve_unallocated_mr(evidence_type, case_number, folder_path, evidence_path.strip(), cmd_string)
gui_outfile.write("Foremost...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Foremost failed")
gui_outfile.write("Foremost failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
else:
if(debug_mode == "ON"):
carve_unallocated_mr(evidence_type, case_number, folder_path, evidence_path.strip(), 'Configuration File')
gui_outfile.write("Foremost...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
carve_unallocated_mr(evidence_type, case_number, folder_path, evidence_path.strip(), 'Configuration File')
gui_outfile.write("Foremost...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Foremost failed")
gui_outfile.write("Foremost failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x == 'Volatility':
if(debug_mode == "ON"):
volatility_mr(case_number, folder_path, evidence_path.strip())
gui_outfile.write("Volatility...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
volatility_mr(case_number, folder_path, evidence_path.strip())
gui_outfile.write("Volatility...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Volatility failed")
gui_outfile.write("Volatility failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x =='EXIF Tool':
if(debug_mode == "ON"):
exifdata_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("EXIF Tool...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
exifdata_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("EXIF Tool...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to EXIF Tool failed")
gui_outfile.write("EXIF Tool failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x == 'NTFS Artifact Extractor':
if(debug_mode == "ON"):
extract_ntfs_artifacts_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("NTFS Artifact Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
extract_ntfs_artifacts_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("NTFS Artifact Extractor...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to NTFS Artifact Extractor failed")
gui_outfile.write("NTFS Artifact Extractor failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x == 'Create KML from JPG EXIF Data':
if(debug_mode == "ON"):
create_kml_from_exif_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("Create KML from JPG EXIF Data...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
create_kml_from_exif_mr(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("Create KML from JPG EXIF Data...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to Create KML from JPG EXIF Data failed")
gui_outfile.write("Create KML from JPEG EXIF failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
elif x == 'PLIST Processor':
if(debug_mode == "ON"):
plist_processor(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("PLIST Processor...".ljust(35) + "completed successfully".ljust(55) + "\n")
else:
try:
plist_processor(evidence_type, case_number, folder_path, evidence_path.strip())
gui_outfile.write("PLIST Processor...".ljust(35) + "completed successfully".ljust(55) + "\n")
except:
print("Call to PLIST Processor failed")
gui_outfile.write("PLIST Processor failed...Please reprocess with Debug Mode ON - running MantaRay from command line as root\n")
gui_outfile.close()
#tell the user the process is done:
done(folder_path)
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
#! /usr/bin/python
import glob
import shutil
import sys
import subprocess
import os
if 'linux' in sys.platform:
platform = 'linux'
else:
platform = 'darwin'
toolchain = "%s/android-toolchain" % os.getenv("HOME")
openssl_version = "1.0.0n"
encfs_version = "1.7.5"
def cpfile(src, target):
sys.stdout.write("Copying %s to %s\n" % (src, target))
shutil.copy(src, target)
archs = ["armeabi","armeabi-v7a"]
if encfs_version != "svn":
encfs_dir = "encfs-%s/encfs-%s" % (encfs_version, encfs_version)
else:
encfs_dir = "encfs-svn"
for arch in archs:
try:
os.makedirs("./obj/local/%s" % arch)
except os.error:
pass
target_dir = "./obj/local/%s/" % arch
if encfs_version != "svn":
cpfile("../boost/boost_1_46_1/android/lib/libboost_filesystem.a", target_dir)
cpfile("../boost/boost_1_46_1/android/lib/libboost_serialization.a", target_dir)
cpfile("../boost/boost_1_46_1/android/lib/libboost_system.a", target_dir)
else:
cpfile("../protobuf/protobuf-2.4.1/%s/lib/libprotobuf.a" % arch, target_dir)
cpfile("../tinyxml/tinyxml/%s/libtinyxml.a" % arch, target_dir)
cpfile("../fuse28/obj/local/%s/libfuse.a" % arch, target_dir)
cpfile("../rlog/rlog-1.4/%s/lib/librlog.a" % arch, target_dir)
cpfile("../%s/%s/lib/libencfs.a" % (encfs_dir, arch), target_dir)
cpfile("../openssl/openssl-%s/%s/libssl.a" % (openssl_version, arch), target_dir)
cpfile("../openssl/openssl-%s/%s/libcrypto.a" % (openssl_version, arch), target_dir)
if arch=="armeabi":
arch_subdir = ""
elif arch == "armeabi-v7a":
arch_subdir = "armv7-a/"
cpfile("%s/arm-linux-androideabi/lib/%slibstdc++.a" % (toolchain, arch_subdir), target_dir)
cpfile("%s/lib/gcc/arm-linux-androideabi/4.8/%slibgcc.a" % (toolchain, arch_subdir), target_dir)
arch = "armeabi"
try:
os.makedirs("./assets/%s" % arch)
except os.error:
pass
# Split into 1M chunks for Android <= 2.2:
# truecrypt
p = subprocess.Popen("/usr/bin/split -b 1m truecrypt truecrypt.split",
cwd="../tc/truecrypt-7.1a-source/Main",
shell=True)
p.wait()
splitfiles = glob.glob("../tc/truecrypt-7.1a-source/Main/truecrypt.split*")
print(splitfiles)
for splitfile in splitfiles:
cpfile(splitfile, "./assets/%s/" % arch)
| Python |
#! /usr/bin/python
import glob
import shutil
import sys
import subprocess
import os
def cpfile(src, target):
sys.stdout.write("Copying %s to %s\n" % (src, target))
shutil.copy(src, target)
# We only copy the armeabi version of the binary
archs = ["armeabi"]
for arch in archs:
try:
os.makedirs("../cryptonite/assets/%s" % arch)
except os.error:
pass
# Split into 1M chunks for Android <= 2.2:
# encfs
p = subprocess.Popen("/usr/bin/split -b 1m encfs encfs.split",
cwd="./encfs-1.7.5/%s/bin" % arch,
shell=True)
p.wait()
splitfiles = glob.glob("./encfs-1.7.5/%s/bin/encfs.split*" % arch)
print splitfiles
for splitfile in splitfiles:
cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
# encfsctl
# p = subprocess.Popen("/usr/bin/split -b 1m encfsctl encfsctl.split",
# cwd="./encfs-1.7.5/%s/bin" % arch,
# shell=True)
# p.wait()
# splitfiles = glob.glob("./encfs-1.7.5/%s/bin/encfsctl.split*" % arch)
# print splitfiles
# for splitfile in splitfiles:
# cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
| Python |
#! /usr/bin/python
import glob
import shutil
import sys
import subprocess
import os
def cpfile(src, target):
sys.stdout.write("Copying %s to %s\n" % (src, target))
shutil.copy(src, target)
# We only copy the armeabi version of the binary
archs = ["armeabi"]
for arch in archs:
try:
os.makedirs("../cryptonite/assets/%s" % arch)
except os.error:
pass
# Split into 1M chunks for Android <= 2.2:
# encfs
p = subprocess.Popen("/usr/bin/split -b 1m encfs encfs.split",
cwd="./%s/bin" % arch,
shell=True)
p.wait()
splitfiles = glob.glob("./%s/bin/encfs.split*" % arch)
print splitfiles
for splitfile in splitfiles:
cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
# encfsctl
# p = subprocess.Popen("/usr/bin/split -b 1m encfsctl encfsctl.split",
# cwd="./%s/bin" % arch,
# shell=True)
# p.wait()
# splitfiles = glob.glob("./%s/bin/encfsctl.split*" % arch)
# print splitfiles
# for splitfile in splitfiles:
# cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
| Python |
#! /usr/bin/python
import glob
import shutil
import sys
import subprocess
import os
def cpfile(src, target):
sys.stdout.write("Copying %s to %s\n" % (src, target))
shutil.copy(src, target)
# We only copy the armeabi version of the binary
archs = ["armeabi"]
for arch in archs:
try:
os.makedirs("../cryptonite/assets/%s" % arch)
except os.error:
pass
# Split into 1M chunks for Android <= 2.2:
# encfs
p = subprocess.Popen("/usr/bin/split -b 1m encfs encfs.split",
cwd="./encfs-1.7.4/%s/bin" % arch,
shell=True)
p.wait()
splitfiles = glob.glob("./encfs-1.7.4/%s/bin/encfs.split*" % arch)
print splitfiles
for splitfile in splitfiles:
cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
# encfsctl
# p = subprocess.Popen("/usr/bin/split -b 1m encfsctl encfsctl.split",
# cwd="./encfs-1.7.4/%s/bin" % arch,
# shell=True)
# p.wait()
# splitfiles = glob.glob("./encfs-1.7.4/%s/bin/encfsctl.split*" % arch)
# print splitfiles
# for splitfile in splitfiles:
# cpfile(splitfile, "../cryptonite/assets/%s/" % arch)
| Python |
# YOU NEED TO INSERT YOUR APP KEY AND SECRET BELOW!
# Go to dropbox.com/developers/apps to create an app.
app_key = ''
app_secret = ''
# access_type can be 'app_folder' or 'dropbox', depending on
# how you registered your app.
access_type = 'app_folder'
from dropbox import client, rest, session
from getpass import getpass
#from mechanize import Browser, HTTPRedirectHandler, ControlNotFoundError
from pickle import dumps, loads
from shlex import split
from subprocess import CalledProcessError, check_output, STDOUT
from sys import argv, stdin
from keyring import delete_password, errors, get_password, set_password
def get_request_token():
print 'Getting request token...'
sess = session.DropboxSession(app_key, app_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
browser = Browser()
browser.set_handle_redirect(HTTPRedirectHandler)
browser.open(url)
browser.select_form(nr=1)
try:
browser["login_email"] = raw_input('Login Email: ')
browser["login_password"] = getpass()
browser.submit()
browser.select_form(nr=1)
browser.submit(name='allow_access')
except ControlNotFoundError:
print 'Dropbox website cannot be parsed correctly...'
print '...Possibly because of a wrong username or password.'
return request_token
def get_access_token():
try:
token_str = get_password('dropbox', app_key)
except ValueError:
print 'Password is incorrect'
if token_str:
key, secret = loads(token_str)
return session.OAuthToken(key, secret)
request_token = get_request_token()
sess = session.DropboxSession(app_key, app_secret, access_type)
access_token = sess.obtain_access_token(request_token)
token_str = dumps((access_token.key, access_token.secret))
set_password('dropbox', app_key, token_str)
return access_token
def get_client():
try:
access_token = get_access_token()
sess = session.DropboxSession(app_key, app_secret, access_type)
sess.set_token(access_token.key, access_token.secret)
dropbox_client = client.DropboxClient(sess)
return dropbox_client
except rest.ErrorResponse:
print 'Token is disabled or invalid'
def help():
print ""
print "This script will allow you to upload output directly to"
print "your Dropbox account. First, it will link your account with"
print "this app (without having to go through a web browser)"
print "and then allow you to run system commands. The output is"
print "uploaded to the Apps/Forensic Upload/[given filepath]"
print "directory of your Dropbox account."
print ""
print "Usage:"
print " forensicupload.py start | pipe"
print ""
print "To use the 'pipe' parameter, pipe the output to this"
print "script along with the filepath to save to in your"
print "Dropbox account. e.g."
print "'ipconfig | forensicupload.py pipe /case123/ipconfig.txt'"
print ""
print "After starting the app with the 'start' paramter, you"
print "can 'link' your Dropbox account, 'unlink' a previously"
print "linked account, or 'run' a system command."
print ""
print " - link = Link your Dropbox account to this app"
print " to allow uploading."
print " - unlink = Remove the session information for your"
print " Dropbox account from this app."
print " - run = Run a system command and upload the output"
print " to your Dropbox account. This command can"
print " take optional parameters to direcly execute"
print " a command. e.g. 'run ipconfig all'. Otherwise,"
print " it will ask for the system command to run."
def link():
print 'Getting account info...'
dropbox_client = get_client()
if dropbox_client == None:
print 'Link has failed.'
else:
print 'linked account:', dropbox_client.account_info()['display_name'],'-',dropbox_client.account_info()['email']
def unlink():
print 'Unlinking account info...'
try:
delete_password('dropbox', app_key)
print '...Done'
except errors.PasswordDeleteError:
print 'There is no account info to unlink'
def upload(filepath, content):
dropbox_client = get_client()
if dropbox_client == None:
print 'Link has failed.'
else:
if filepath:
dropbox_client.put_file(filepath,content)
else:
print "You must specify a filepath and file."
print "e.g. /case1001/ping.txt"
def run(system_command):
try:
filepath = raw_input('Upload to which filepath? ')
upload(filepath, check_output(system_command, stderr=STDOUT, shell=True))
except CalledProcessError:
'Error while calling ',system_command,'.'
def command_loop():
continue_loop = 1
while(continue_loop):
print ""
command_list = split(raw_input('Please select an option: '))
command = command_list[0]
if command == 'link':
link()
elif command == 'unlink':
unlink()
elif command == 'run':
system_command = None
if len(command_list) > 1:
system_command = command_list[1:len(command_list)]
else:
system_command = split(raw_input('Which system command? '))
run(system_command)
elif command == 'help':
help()
elif command == 'quit':
continue_loop = 0
else:
print 'You can run "link", "unlink", "run [command [command args]]", "help", or "quit"'
def main():
if len(argv) < 2:
help()
elif argv[1] == 'pipe':
pipe_input = stdin.read()
if pipe_input:
upload(argv[2], pipe_input)
else:
print 'No input to upload'
elif argv[1] == 'start':
command_loop()
else:
help()
if __name__ == '__main__':
main() | Python |
# YOU NEED TO INSERT YOUR APP KEY AND SECRET BELOW!
# Go to dropbox.com/developers/apps to create an app.
app_key = ''
app_secret = ''
# access_type can be 'app_folder' or 'dropbox', depending on
# how you registered your app.
access_type = 'app_folder'
from dropbox import client, rest, session
from getpass import getpass
from mechanize import Browser, HTTPRedirectHandler, ControlNotFoundError
from pickle import dumps, loads
from shlex import split
from subprocess import CalledProcessError, check_output, STDOUT
from sys import argv, stdin
from keyring import delete_password, errors, get_password, set_password
def get_request_token():
print 'Getting request token...'
sess = session.DropboxSession(app_key, app_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
browser = Browser()
browser.set_handle_redirect(HTTPRedirectHandler)
browser.open(url)
browser.select_form(nr=1)
try:
browser["login_email"] = raw_input('Login Email: ')
browser["login_password"] = getpass()
browser.submit()
browser.select_form(nr=1)
browser.submit(name='allow_access')
except ControlNotFoundError:
print 'Dropbox website cannot be parsed correctly...'
print '...Possibly because of a wrong username or password.'
return request_token
def get_access_token():
try:
token_str = get_password('dropbox', app_key)
except ValueError:
print 'Password is incorrect'
if token_str:
key, secret = loads(token_str)
return session.OAuthToken(key, secret)
request_token = get_request_token()
sess = session.DropboxSession(app_key, app_secret, access_type)
access_token = sess.obtain_access_token(request_token)
token_str = dumps((access_token.key, access_token.secret))
set_password('dropbox', app_key, token_str)
return access_token
def get_client():
try:
access_token = get_access_token()
sess = session.DropboxSession(app_key, app_secret, access_type)
sess.set_token(access_token.key, access_token.secret)
dropbox_client = client.DropboxClient(sess)
return dropbox_client
except rest.ErrorResponse:
print 'Token is disabled or invalid'
def help():
print ""
print "This script will allow you to upload output directly to"
print "your Dropbox account. First, it will link your account with"
print "this app (without having to go through a web browser)"
print "and then allow you to run system commands. The output is"
print "uploaded to the Apps/Forensic Upload/[given filepath]"
print "directory of your Dropbox account."
print ""
print "Usage:"
print " forensicupload.py start | pipe"
print ""
print "To use the 'pipe' parameter, pipe the output to this"
print "script along with the filepath to save to in your"
print "Dropbox account. e.g."
print "'ipconfig | forensicupload.py pipe /case123/ipconfig.txt'"
print ""
print "After starting the app with the 'start' paramter, you"
print "can 'link' your Dropbox account, 'unlink' a previously"
print "linked account, or 'run' a system command."
print ""
print " - link = Link your Dropbox account to this app"
print " to allow uploading."
print " - unlink = Remove the session information for your"
print " Dropbox account from this app."
print " - run = Run a system command and upload the output"
print " to your Dropbox account. This command can"
print " take optional parameters to direcly execute"
print " a command. e.g. 'run ipconfig /all'. Otherwise,"
print " it will ask for the system command to run."
def link():
print 'Getting account info...'
dropbox_client = get_client()
if dropbox_client == None:
print 'Link has failed.'
else:
print 'linked account:', dropbox_client.account_info()['display_name'],'-',dropbox_client.account_info()['email']
def unlink():
print 'Unlinking account info...'
try:
delete_password('dropbox', app_key)
print '...Done'
except errors.PasswordDeleteError:
print 'There is no account info to unlink'
def upload(filepath, content):
dropbox_client = get_client()
if dropbox_client == None:
print 'Link has failed.'
else:
if filepath:
dropbox_client.put_file(filepath,content)
else:
print "You must specify a filepath and file."
print "e.g. /case1001/ping.txt"
def run(system_command):
try:
filepath = raw_input('Upload to which filepath? ')
upload(filepath, check_output(system_command, stderr=STDOUT, shell=True))
except CalledProcessError:
'Error while calling ',system_command,'.'
def command_loop():
continue_loop = 1
while(continue_loop):
print ""
command_list = split(raw_input('Please select an option: '))
command = command_list[0]
if command == 'link':
link()
elif command == 'unlink':
unlink()
elif command == 'run':
system_command = None
if len(command_list) > 1:
system_command = command_list[1:len(command_list)]
else:
system_command = split(raw_input('Which system command? '))
run(system_command)
elif command == 'help':
help()
elif command == 'quit':
continue_loop = 0
else:
print 'You can run "link", "unlink", "run [command [command args]]", "help", or "quit"'
def main():
# Demo if started run as a script...
# Just print the account info to verify that the authentication worked:
if len(argv) < 2:
help()
elif argv[1] == 'pipe':
pipe_input = stdin.read()
if pipe_input:
upload(argv[2], pipe_input)
else:
print 'No input to upload'
elif argv[1] == 'start':
command_loop()
else:
help()
if __name__ == '__main__':
main() | Python |
from ._Sonars import *
| Python |
#!/usr/bin/python
import sys
import os
import string
import subprocess
import time
"""
Usage: rpcexec -n n_to_start -f [hostsfile] [program] [options]
To start local only: rpcexec [program] [options]
"""
def escape(s):
s = string.replace(s, '"', '\\"')
s = string.replace(s, "'", "\\'")
return s
#enddef
# gui: if xterm should run
# machines: a vector of all the machines
# port: a vector of the port number for ssh to connect to. must be same length as machines
# machineid: The machineid to generate
# prog: program to run
# opts: options for the program
def get_ssh_cmd(gui, machines, port, machineid, prog, opts):
allmachines = '"' + string.join(machines, ',') + '"'
# construct the command line
cwd = os.getcwd()
if (gui):
sshcmd = 'ssh -X -Y -n -q '
else:
sshcmd = 'ssh -n -q '
#endif
guicmd = ''
if (gui):
guicmd = 'xterm -geometry 120x60 -e '
#endif
if (machines[i] == "localhost" or machines[i].startswith("127.")):
cmd = 'env SPAWNNODES=%s SPAWNID=%d %s %s' % (allmachines,i, prog, opts)
elif (port[i] == 22):
cmd = sshcmd + '%s "cd %s ; env SPAWNNODES=%s SPAWNID=%d %s %s %s"' % \
(machines[machineid], escape(cwd), escape(allmachines),machineid, \
guicmd, escape(prog), escape(opts))
else:
cmd = sshcmd + '-oPort=%d %s "cd %s ; env SPAWNNODES=%s SPAWNID=%d %s %s %s"' % \
(port[machineid], machines[machineid], escape(cwd), escape(allmachines), \
machineid, guicmd, escape(prog), escape(opts))
#endif
return cmd
#enddef
def get_screen_cmd(gui, machines, port, machineid, prog, opts):
allmachines = '"' + string.join(machines, ',') + '"'
# construct the command line
cwd = os.getcwd()
sshcmd = 'ssh -t '
#endif
guicmd = ''
if (machines[i] == "localhost" or machines[i].startswith("127.")):
cmd = ['export SPAWNNODES=%s SPAWNID=%d ; %s %s' % (allmachines,i, prog, opts)]
elif (port[i] == 22):
cmd = [sshcmd + '%s "cd %s ; export SPAWNNODES=%s SPAWNID=%d; %s %s %s ; bash -il"' % \
(machines[machineid], escape(cwd), escape(allmachines),machineid, \
guicmd, escape(prog), escape(opts))]
else:
cmd = [sshcmd + '-oPort=%d %s "cd %s ; export SPAWNNODES=%s SPAWNID=%d; %s %s %s ; bash -il"' % \
(port[machineid], machines[machineid], escape(cwd), escape(allmachines), \
machineid, guicmd, escape(prog), escape(opts))]
#endif
return cmd
#enddef
def shell_popen(cmd):
print cmd
return subprocess.Popen(cmd, shell=True)
#endif
def shell_wait_native(cmd):
print cmd
pid = subprocess.Popen(cmd, shell=True)
os.waitpid(pid.pid, 0)
#time.sleep(0.5)
#endif
nmachines = 0
hostsfile = ''
prog = ''
opts = ''
gui = 0
inscreen = 0
screenname = ''
printhelp = 0
i = 1
while(i < len(sys.argv)):
if sys.argv[i] == '-h' or sys.argv[i] == '--help':
printhelp = 1
break
elif sys.argv[i] == '-n':
nmachines = int(sys.argv[i+1])
i = i + 2
elif sys.argv[i] == '-f':
hostsfile = sys.argv[i+1]
i = i + 2
elif sys.argv[i] == '-g':
gui = 1
i = i + 1
elif sys.argv[i] == '-s':
inscreen = 1
screenname = sys.argv[i+1]
i = i + 2
else:
prog = sys.argv[i]
if (len(sys.argv) > i+1):
opts = string.join(sys.argv[(i+1):])
#endif
break
#endif
#endwhile
if inscreen and gui:
print ("-s and -g are mutually exclusive")
exit(0)
#endif
if (printhelp):
print
print("Usage: rpcexec -n [n_to_start] -f [hostsfile] [program] [options]")
print("To start local only: rpcexec [program] [options]")
print("Optional Arguments:")
print("-g: Launch the command within Xterm on all machines. ")
print("-s [screenname] : Launch a screen session and launch the")
print(" commands in each window in each window. Any ssh connections")
print(" are preserved on termination of the program with environment")
print(" properly set up for subsequent executions")
print("")
print("Note: -s [screenname] and -g are mutually exclusive")
exit(0)
#endif
if (nmachines == 0 and hostsfile == ''):
cmd = 'env SPAWNNODES=localhost SPAWNID=0 %s %s' % (prog, opts)
p = shell_popen(cmd)
os.waitpid(p.pid, 0)
exit(0)
#endif
print('Starting ' + str(nmachines) + ' machines')
print('Hosts file: ' + hostsfile)
print('Command Line to run: ' + prog + ' ' + opts)
# open the hosts file and read the machines
try:
f = open(hostsfile, 'r')
except:
print
print("Unable to open hosts file")
print
exit(0)
#endtry
machines = [''] * nmachines
port = [22] * nmachines
for i in range(nmachines):
try:
machines[i] = string.strip(f.readline())
colonsplit = string.split(machines[i], ':')
if (len(colonsplit) == 2):
machines[i] = string.strip(colonsplit[0])
port[i] = int(colonsplit[1])
#endif
except:
print
print("Unable to read line " + str(i+1) + " of hosts file")
print
exit(0)
#endfor
f.close()
# the commands to run to start for each node
cmd = [None] * nmachines
for i in range(nmachines):
if (inscreen == 0):
cmd[i] = get_ssh_cmd(gui, machines, port, i, prog, opts)
else:
cmd[i] = get_screen_cmd(gui, machines, port, i, prog, opts)
print cmd[i]
#endif
#endfor
if (inscreen == 0):
# now issue the ssh commands
procs = [None] * nmachines
for i in range(nmachines):
procs[i] = shell_popen(cmd[i])
#endfor
for i in range(nmachines):
os.waitpid(procs[i].pid, 0)
#endfor
else:
# create a new empty screen with the screen name
shell_wait_native("screen -h 10000 -d -m -S " + screenname)
shell_wait_native("screen -h 10000 -x %s -p 0 -X title %s" % (screenname, machines[0][0:8]))
# start a bunch of empty screens
for i in range(nmachines - 1):
shell_wait_native("screen -x %s -X screen -t %s" % (screenname, machines[i+1][0:8]))
#endfor
# set the titles in each one and run the program
# we stripe it across windows so if there are ssh commands they will
# have time to finish running first
for j in range(2):
for i in range(nmachines):
if (len(cmd[i]) > j and cmd[i][j] != None):
shell_wait_native("screen -x %s -p %d -X stuff %s" % (screenname, i, "'"+cmd[i][j]+"\n'"))
#endif
#endfor
#endfor
#endif
| Python |
#!/usr/bin/python
import os
import sys
def update_source(filename, oldcopyright, copyright):
fdata = file(filename,"r+").read()
# If there was a previous copyright remove it
if (oldcopyright != None):
if (fdata.startswith(oldcopyright)):
fdata = fdata[len(oldcopyright):]
# If the file does not start with the new copyright
if not (fdata.startswith(copyright)):
print " updating: " + filename
fdata = copyright + fdata
file(filename,"w").write(fdata)
def recursive_traversal(dir, oldcopyright, copyright):
fns = os.listdir(dir)
print "Processing directory: "+dir
for fn in fns:
fullfn = os.path.join(dir,fn)
if (os.path.isdir(fullfn)):
recursive_traversal(fullfn, oldcopyright, copyright)
else:
if (fullfn.endswith(".cpp") or fullfn.endswith(".hpp") or
fullfn.endswith(".cxx") ):
update_source(fullfn, oldcopyright, copyright)
oldcright = file(sys.argv[1],"r+").read()
cright = file(sys.argv[2],"r+").read()
recursive_traversal(sys.argv[3], oldcright, cright)
exit()
| Python |
#!/usr/bin/python
import sys
import string
import subprocess
def left_child(index) : return 2 * index + 1
def right_child(index) : return 2 * index + 2
def escape_str(text) :
return text.replace('\\', '\\\\').replace('"', '\\"')
def do_send(src, dest, path ) :
return ' ( rsync -avz ' + \
path + '/' + src + ' ' + \
dest + ':' + path + ' ) '
def make_str(names, a, b, index) :
assert(left_child(index) < len(names));
text = '( ssh ' + names[index]
text = text + ' " hostname; ';
text = text + \
do_send(a, names[left_child(index)], b)
if right_child(index) < len(names) :
text = text + ' & ' + \
do_send(a, names[right_child(index)], b) + \
' ; wait ; '
if left_child(left_child(index)) < len(names) :
text = text + \
escape_str(make_str(names, a, b, left_child(index)))
if left_child(right_child(index)) < len(names) :
text = text + ' & ' + \
escape_str(make_str(names, a, b, right_child(index))) + \
' ; wait ; '
text = text + ' " ) '
return text;
host_filename = sys.argv[1]
source_path = sys.argv[2]
dest_path = sys.argv[3]
textfile = open(host_filename, "r")
lines = textfile.readlines()
machines = [ x.replace('\n', '') for x in lines]
print machines
command = 'rsync -avz ' + source_path + ' ' + \
machines[0] + ':' + dest_path;
print command;
subprocess.call(command, shell=True)
command = make_str(machines, source_path, dest_path, 0);
print command
subprocess.call(command, shell=True)
| Python |
for v in graph.getVertices():
print(v.value.rank) | Python |
import math
# Python implementation of pagerank
damping = 0.85
def update(scope, scheduler):
pvertex = scope.getVertex().value
sumval = pvertex.rank * pvertex.selfedge + sum([e.value * scope.getNeighbor(e.from).value.rank for e in scope.getInboundEdges()])
newval = (1-damping)/scope.getNumOfVertices() + damping*sumval
if (abs(newval-pvertex.rank)>0.00001):
scheduler.addTaskToOutbound(scope)
pvertex.rank = newval
update(scope, scheduler) | Python |
class pagerank_vertex:
def __init__(self, value, selfedge):
self.rank = value
self.selfedge = selfedge
f = open(filename, "r")
lines = f.readlines()
# First line is header
header = lines[0]
lines = lines[1:]
nvertices = int(header.split(",")[1])
for i in range(0,nvertices):
# format: first value is value, second is self edge weight
graph.addVertex(pagerank_vertex(1.0/nvertices, 0.0))
for l in lines:
t = l.split(",")
i = int(t[0])-1
j = int(t[1])-1
# Each line ends (annoyingly) to \n
w = float(t[2][:-1])
if (i != j):
graph.addEdge(j, i, w)
else:
graph.getVertex(i).value.selfedge = w | Python |
import math
lamb = 0.5
# Python implementation of Lasso Shooting algorithm.
# min ||Ax-y||_2^2 + lambda ||x||_1
def update(scope, scheduler):
# Of class lasso_variable_vertex or lasso_estimate_vertex
lassov = scope.getVertex().value
if (lassov.vtype == 0):
if lassov.initialized == False:
# Initialize covariance
lassov.covar = 2.0*sum([e.value*e.value for e in scope.getOutboundEdges()])
# Initialize (Ay)_i
lassov.Ay = 2.0*sum([e.value * scope.getNeighbor(e.to).value.observed for e in scope.getOutboundEdges()])
lassov.initialized = True
# Compute (Ax)_i
curest = sum([e.value * scope.getNeighbor(e.to).value.curval for e in scope.getOutboundEdges()])
newval = soft_threshold(lamb, curest*2 - lassov.covar*lassov.value - lassov.Ay)/lassov.covar
# if (newval == 0.0):
# print("zero!")
#if (scope.getVertex().getId() % 100 == 0):
# print(scope.getVertex().getId(), lassov.value, newval, curest*2 - lassov.covar*lassov.value - lassov.Ay, lassov.covar, lassov.Ay)
if newval != lassov.value:
delta = newval-lassov.value
lassov.value = newval
for e in scope.getOutboundEdges():
scope.getNeighbor(e.to).value.curval += delta * e.value
def soft_threshold (lamb, x):
if (x > lamb):
return (lamb-x)
elif (x < lamb):
return (-lamb-x)
else:
return 0.0
update(scope, scheduler) | Python |
graphlab.setScheduler("round_robin")
graphlab.setIterations(100)
graphlab.setScopeType("vertex") | Python |
import math
# Python implementation of pagerank
def update(scope, scheduler):
vertex = scope.getVertex()
oldval = vertex.value
newval = vertex.value * vertex.selfEdgeWeight
newval = newval + sum([e.weight*e.value for e in scope.getInboundEdges()])
vertex.setValue(newval)
if (abs(newval-oldval)>0.00001):
scheduler.addTaskToOutbound(scope)
update(scope, scheduler) | Python |
leastsqr_err = 0.0
penalty = 0.0
lamb = 0.5
for v in graph.getVertices():
lassov = v.value
if lassov.vtype == 0:
penalty += lamb * abs(lassov.value)
else:
leastsqr_err += pow(lassov.observed - lassov.curval,2)
print("Objective:", penalty + leastsqr_err) | Python |
#
# Solve Lasso: min ||Ax-y||_2^2 + \lambda ||x||_1
#
# We present Lasso as a bipartite graph. On the left side, we have
# variables x_i (predictors) and on the right side the current estimates
# for y_i = (Ax)_i. Sides are connected by edges weighted by A_ij
#
#
# Rights side of the graph. Estimate for y_i. We store
# the actual y_i as well (observed), in order to quickly
# compute prediction error.
#
class lasso_estimate_vertex:
def __init__(self, value, observed):
self.curval = value
self.lastval = value
self.observed = observed
self.vtype = 1
class lasso_variable_vertex:
def __init__(self, value):
self.value = value
self.covar = 0
self.Ay = 0
self.initialized = False
self.vtype = 0
f = open(filename, "r")
lines = f.readlines()
header = lines[0].split(",")
assert(header[0] == "y")
# First read y-values
ny = int(header[1])
for i in range(1,ny+1):
val = float(lines[i])
graph.addVertex(lasso_estimate_vertex(0.0, val))
# Remove the first part
lines = lines[ny+1:]
# Read edges
header = lines[0].split(",")
print(header)
assert(header[0] == "A")
n = int(header[1])
nx = int(header[2])
# Create variables
for i in range(0,nx):
graph.addVertex(lasso_variable_vertex(0.0))
for i in range(1,n+1):
ln = lines[i].split(",")
idx = int(ln[0])-1
val = float(ln[1])
row = idx%ny
col = idx/ny
# Create edge between variable x_col and y_row
graph.addEdge(col+ny, row, val)
assert(col+ny>row)
#print(idx, col, row)
print("Data loaded") | Python |
# module pyparsing.py
#
# Copyright (c) 2003-2010 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.5"
__versionTime__ = "12 Aug 2010 03:56"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_PY3K = sys.version_info[0] > 2
if _PY3K:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
alphas = string.ascii_lowercase + string.ascii_uppercase
else:
_MAX_INT = sys.maxint
range = xrange
set = lambda s : dict( [(c,0) for c in s] )
alphas = string.lowercase + string.uppercase
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import builtins
for fname in "sum len enumerate sorted reversed list tuple set any all".split():
try:
singleArgBuiltins.append(getattr(builtins,fname))
except AttributeError:
continue
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
for from_,to_ in zip('&><"\'', "amp gt lt quot apos".split()):
data = data.replace(from_, '&'+to_+';')
return data
class _Constants(object):
pass
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{ParseFatalException}, but thrown internally when an
C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
#~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( reversed(self.__toklist) )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if True: #name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as C{f(s,l,t)}."""
STAR_ARGS = 4
# special handling for single-argument builtins
if (f in singleArgBuiltins):
numargs = 1
else:
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if not _PY3K:
codeObj = f.func_code
else:
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if not _PY3K:
if hasattr(f,"im_self"):
numargs -= 1
else:
if hasattr(f,"__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
if not _PY3K:
call_im_func_code = f.__call__.im_func.func_code
else:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 0
except AttributeError:
if not _PY3K:
call_func_code = f.__call__.func_code
else:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s,l,t):
return f(f.__call__.__self__, s,l,t)
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{ParseFatalException}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException:
#~ print ("Exception raised:", err)
err = None
if self.debugActions[2]:
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
if err is None:
err = sys.exc_info()[1]
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
raise value
return value
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException:
pe = sys.exc_info()[1]
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{StringEnd()}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
#loc = self.preParse( instring, loc )
se = StringEnd()
se._parse( instring, loc )
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def transformString( self, instring ):
"""Extension to C{scanString}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out))
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{scanString}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{And} with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + ZeroOrMore(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}
- C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{MatchFirst}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{Or}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{Each}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{NotAny}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{setResultsName}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{Literal}::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{Word} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given C{ParseExpressions} to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException:
pe = sys.exc_info()[1]
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException:
err = sys.exc_info()[1]
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpressions} to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += list(e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt)
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print("found ignoreExpr, advance to", loc)
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception:
exc = sys.exc_info()[1]
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{MatchFirst} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{keepOriginalText}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as False, then the return value is a
C{ParseResults} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{originalTextFor} contains expressions with defined
results names, you must set C{asString} to False if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{transformString()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{originalTextFor}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one blockStatement.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException:
err = sys.exc_info()[1]
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| Python |
# module pyparsing.py
#
# Copyright (c) 2003-2010 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.5"
__versionTime__ = "12 Aug 2010 03:56"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_PY3K = sys.version_info[0] > 2
if _PY3K:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
_str2dict = set
alphas = string.ascii_lowercase + string.ascii_uppercase
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
alphas = string.lowercase + string.uppercase
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len enumerate sorted reversed list tuple set any all".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{ParseFatalException}, but thrown internally when an
C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
#~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if True: #name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as C{f(s,l,t)}."""
STAR_ARGS = 4
# special handling for single-argument builtins
if (f in singleArgBuiltins):
numargs = 1
else:
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if not _PY3K:
codeObj = f.func_code
else:
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if not _PY3K:
if hasattr(f,"im_self"):
numargs -= 1
else:
if hasattr(f,"__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
if not _PY3K:
call_im_func_code = f.__call__.im_func.func_code
else:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 0
except AttributeError:
if not _PY3K:
call_func_code = f.__call__.func_code
else:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s,l,t):
return f(f.__call__.__self__, s,l,t)
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{ParseFatalException}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException:
#~ print ("Exception raised:", err)
err = None
if self.debugActions[2]:
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
if err is None:
err = sys.exc_info()[1]
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
raise value
return value
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException:
pe = sys.exc_info()[1]
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{StringEnd()}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
#loc = self.preParse( instring, loc )
se = StringEnd()
se._parse( instring, loc )
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def transformString( self, instring ):
"""Extension to C{scanString}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{scanString}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{And} with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + ZeroOrMore(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}
- C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{MatchFirst}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{Or}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{Each}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{NotAny}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{setResultsName}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{Literal}::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{Word} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given C{ParseExpressions} to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException:
pe = sys.exc_info()[1]
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException:
err = sys.exc_info()[1]
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpressions} to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print "found ignoreExpr, advance to", loc
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception:
exc = sys.exc_info()[1]
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{MatchFirst} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{keepOriginalText}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as False, then the return value is a
C{ParseResults} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{originalTextFor} contains expressions with defined
results names, you must set C{asString} to False if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{transformString()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{originalTextFor}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one blockStatement.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException:
err = sys.exc_info()[1]
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| Python |
#!/usr/bin/python
'''Usage: %s [OPTIONS] <input file(s)>
Generate test source file for CxxTest.
-v, --version Write CxxTest version
-o, --output=NAME Write output to file NAME
--runner=CLASS Create a main() function that runs CxxTest::CLASS
--gui=CLASS Like --runner, with GUI component
--error-printer Same as --runner=ErrorPrinter
--abort-on-fail Abort tests on failed asserts (like xUnit)
--have-std Use standard library (even if not found in tests)
--no-std Don\'t use standard library (even if found in tests)
--have-eh Use exception handling (even if not found in tests)
--no-eh Don\'t use exception handling (even if found in tests)
--longlong=[TYPE] Use TYPE (default: long long) as long long
--template=TEMPLATE Use TEMPLATE file to generate the test runner
--include=HEADER Include HEADER in test runner before other headers
--root Write CxxTest globals
--part Don\'t write CxxTest globals
--no-static-init Don\'t rely on static initialization
'''
import re
import sys
import getopt
import glob
import string
# Global variables
suites = []
suite = None
inBlock = 0
outputFileName = None
runner = None
gui = None
root = None
part = None
noStaticInit = None
templateFileName = None
headers = []
haveExceptionHandling = 0
noExceptionHandling = 0
haveStandardLibrary = 0
noStandardLibrary = 0
abortOnFail = 0
factor = 0
longlong = 0
def main():
'''The main program'''
files = parseCommandline()
scanInputFiles( files )
writeOutput()
def usage( problem = None ):
'''Print usage info and exit'''
if problem is None:
print usageString()
sys.exit(0)
else:
sys.stderr.write( usageString() )
abort( problem )
def usageString():
'''Construct program usage string'''
return __doc__ % sys.argv[0]
def abort( problem ):
'''Print error message and exit'''
sys.stderr.write( '\n' )
sys.stderr.write( problem )
sys.stderr.write( '\n\n' )
sys.exit(2)
def parseCommandline():
'''Analyze command line arguments'''
try:
options, patterns = getopt.getopt( sys.argv[1:], 'o:r:',
['version', 'output=', 'runner=', 'gui=',
'error-printer', 'abort-on-fail', 'have-std', 'no-std',
'have-eh', 'no-eh', 'template=', 'include=',
'root', 'part', 'no-static-init', 'factor', 'longlong='] )
except getopt.error, problem:
usage( problem )
setOptions( options )
return setFiles( patterns )
def setOptions( options ):
'''Set options specified on command line'''
global outputFileName, templateFileName, runner, gui, haveStandardLibrary, factor, longlong
global haveExceptionHandling, noExceptionHandling, abortOnFail, headers, root, part, noStaticInit
for o, a in options:
if o in ('-v', '--version'):
printVersion()
elif o in ('-o', '--output'):
outputFileName = a
elif o == '--template':
templateFileName = a
elif o == '--runner':
runner = a
elif o == '--gui':
gui = a
elif o == '--include':
if not re.match( r'^["<].*[>"]$', a ):
a = ('"%s"' % a)
headers.append( a )
elif o == '--error-printer':
runner = 'ErrorPrinter'
haveStandardLibrary = 1
elif o == '--abort-on-fail':
abortOnFail = 1
elif o == '--have-std':
haveStandardLibrary = 1
elif o == '--no-std':
noStandardLibrary = 1
elif o == '--have-eh':
haveExceptionHandling = 1
elif o == '--no-eh':
noExceptionHandling = 1
elif o == '--root':
root = 1
elif o == '--part':
part = 1
elif o == '--no-static-init':
noStaticInit = 1
elif o == '--factor':
factor = 1
elif o == '--longlong':
if a:
longlong = a
else:
longlong = 'long long'
if noStaticInit and (root or part):
abort( '--no-static-init cannot be used with --root/--part' )
if gui and not runner:
runner = 'StdioPrinter'
def printVersion():
'''Print CxxTest version and exit'''
sys.stdout.write( "This is CxxTest version 3.10.1.\n" )
sys.exit(0)
def setFiles( patterns ):
'''Set input files specified on command line'''
files = expandWildcards( patterns )
if len(files) is 0 and not root:
usage( "No input files found" )
return files
def expandWildcards( patterns ):
'''Expand all wildcards in an array (glob)'''
fileNames = []
for pathName in patterns:
patternFiles = glob.glob( pathName )
for fileName in patternFiles:
fileNames.append( fixBackslashes( fileName ) )
return fileNames
def fixBackslashes( fileName ):
'''Convert backslashes to slashes in file name'''
return re.sub( r'\\', '/', fileName, 0 )
def scanInputFiles(files):
'''Scan all input files for test suites'''
for file in files:
scanInputFile(file)
global suites
if len(suites) is 0 and not root:
abort( 'No tests defined' )
def scanInputFile(fileName):
'''Scan single input file for test suites'''
file = open(fileName)
lineNo = 0
while 1:
line = file.readline()
if not line:
break
lineNo = lineNo + 1
scanInputLine( fileName, lineNo, line )
closeSuite()
file.close()
def scanInputLine( fileName, lineNo, line ):
'''Scan single input line for interesting stuff'''
scanLineForExceptionHandling( line )
scanLineForStandardLibrary( line )
scanLineForSuiteStart( fileName, lineNo, line )
global suite
if suite:
scanLineInsideSuite( suite, lineNo, line )
def scanLineInsideSuite( suite, lineNo, line ):
'''Analyze line which is part of a suite'''
global inBlock
if lineBelongsToSuite( suite, lineNo, line ):
scanLineForTest( suite, lineNo, line )
scanLineForCreate( suite, lineNo, line )
scanLineForDestroy( suite, lineNo, line )
def lineBelongsToSuite( suite, lineNo, line ):
'''Returns whether current line is part of the current suite.
This can be false when we are in a generated suite outside of CXXTEST_CODE() blocks
If the suite is generated, adds the line to the list of lines'''
if not suite['generated']:
return 1
global inBlock
if not inBlock:
inBlock = lineStartsBlock( line )
if inBlock:
inBlock = addLineToBlock( suite, lineNo, line )
return inBlock
std_re = re.compile( r"\b(std\s*::|CXXTEST_STD|using\s+namespace\s+std\b|^\s*\#\s*include\s+<[a-z0-9]+>)" )
def scanLineForStandardLibrary( line ):
'''Check if current line uses standard library'''
global haveStandardLibrary, noStandardLibrary
if not haveStandardLibrary and std_re.search(line):
if not noStandardLibrary:
haveStandardLibrary = 1
exception_re = re.compile( r"\b(throw|try|catch|TSM?_ASSERT_THROWS[A-Z_]*)\b" )
def scanLineForExceptionHandling( line ):
'''Check if current line uses exception handling'''
global haveExceptionHandling, noExceptionHandling
if not haveExceptionHandling and exception_re.search(line):
if not noExceptionHandling:
haveExceptionHandling = 1
suite_re = re.compile( r'\bclass\s+(\w+)\s*:\s*public\s+((::)?\s*CxxTest\s*::\s*)?TestSuite\b' )
generatedSuite_re = re.compile( r'\bCXXTEST_SUITE\s*\(\s*(\w*)\s*\)' )
def scanLineForSuiteStart( fileName, lineNo, line ):
'''Check if current line starts a new test suite'''
m = suite_re.search( line )
if m:
startSuite( m.group(1), fileName, lineNo, 0 )
m = generatedSuite_re.search( line )
if m:
sys.stdout.write( "%s:%s: Warning: Inline test suites are deprecated.\n" % (fileName, lineNo) )
startSuite( m.group(1), fileName, lineNo, 1 )
def startSuite( name, file, line, generated ):
'''Start scanning a new suite'''
global suite
closeSuite()
suite = { 'name' : name,
'file' : file,
'cfile' : cstr(file),
'line' : line,
'generated' : generated,
'object' : 'suite_%s' % name,
'dobject' : 'suiteDescription_%s' % name,
'tlist' : 'Tests_%s' % name,
'tests' : [],
'lines' : [] }
def lineStartsBlock( line ):
'''Check if current line starts a new CXXTEST_CODE() block'''
return re.search( r'\bCXXTEST_CODE\s*\(', line ) is not None
test_re = re.compile( r'^([^/]|/[^/])*\bvoid\s+([Tt]est\w+)\s*\(\s*(void)?\s*\)' )
def scanLineForTest( suite, lineNo, line ):
'''Check if current line starts a test'''
m = test_re.search( line )
if m:
addTest( suite, m.group(2), lineNo )
def addTest( suite, name, line ):
'''Add a test function to the current suite'''
test = { 'name' : name,
'suite' : suite,
'class' : 'TestDescription_%s_%s' % (suite['name'], name),
'object' : 'testDescription_%s_%s' % (suite['name'], name),
'line' : line,
}
suite['tests'].append( test )
def addLineToBlock( suite, lineNo, line ):
'''Append the line to the current CXXTEST_CODE() block'''
line = fixBlockLine( suite, lineNo, line )
line = re.sub( r'^.*\{\{', '', line )
e = re.search( r'\}\}', line )
if e:
line = line[:e.start()]
suite['lines'].append( line )
return e is None
def fixBlockLine( suite, lineNo, line):
'''Change all [E]TS_ macros used in a line to _[E]TS_ macros with the correct file/line'''
return re.sub( r'\b(E?TSM?_(ASSERT[A-Z_]*|FAIL))\s*\(',
r'_\1(%s,%s,' % (suite['cfile'], lineNo),
line, 0 )
create_re = re.compile( r'\bstatic\s+\w+\s*\*\s*createSuite\s*\(\s*(void)?\s*\)' )
def scanLineForCreate( suite, lineNo, line ):
'''Check if current line defines a createSuite() function'''
if create_re.search( line ):
addSuiteCreateDestroy( suite, 'create', lineNo )
destroy_re = re.compile( r'\bstatic\s+void\s+destroySuite\s*\(\s*\w+\s*\*\s*\w*\s*\)' )
def scanLineForDestroy( suite, lineNo, line ):
'''Check if current line defines a destroySuite() function'''
if destroy_re.search( line ):
addSuiteCreateDestroy( suite, 'destroy', lineNo )
def cstr( str ):
'''Convert a string to its C representation'''
return '"' + string.replace( str, '\\', '\\\\' ) + '"'
def addSuiteCreateDestroy( suite, which, line ):
'''Add createSuite()/destroySuite() to current suite'''
if suite.has_key(which):
abort( '%s:%s: %sSuite() already declared' % ( suite['file'], str(line), which ) )
suite[which] = line
def closeSuite():
'''Close current suite and add it to the list if valid'''
global suite
if suite is not None:
if len(suite['tests']) is not 0:
verifySuite(suite)
rememberSuite(suite)
suite = None
def verifySuite(suite):
'''Verify current suite is legal'''
if suite.has_key('create') and not suite.has_key('destroy'):
abort( '%s:%s: Suite %s has createSuite() but no destroySuite()' %
(suite['file'], suite['create'], suite['name']) )
if suite.has_key('destroy') and not suite.has_key('create'):
abort( '%s:%s: Suite %s has destroySuite() but no createSuite()' %
(suite['file'], suite['destroy'], suite['name']) )
def rememberSuite(suite):
'''Add current suite to list'''
global suites
suites.append( suite )
def writeOutput():
'''Create output file'''
if templateFileName:
writeTemplateOutput()
else:
writeSimpleOutput()
def writeSimpleOutput():
'''Create output not based on template'''
output = startOutputFile()
writePreamble( output )
writeMain( output )
writeWorld( output )
output.close()
include_re = re.compile( r"\s*\#\s*include\s+<cxxtest/" )
preamble_re = re.compile( r"^\s*<CxxTest\s+preamble>\s*$" )
world_re = re.compile( r"^\s*<CxxTest\s+world>\s*$" )
def writeTemplateOutput():
'''Create output based on template file'''
template = open(templateFileName)
output = startOutputFile()
while 1:
line = template.readline()
if not line:
break;
if include_re.search( line ):
writePreamble( output )
output.write( line )
elif preamble_re.search( line ):
writePreamble( output )
elif world_re.search( line ):
writeWorld( output )
else:
output.write( line )
template.close()
output.close()
def startOutputFile():
'''Create output file and write header'''
if outputFileName is not None:
output = open( outputFileName, 'w' )
else:
output = sys.stdout
output.write( "/* Generated file, do not edit */\n\n" )
return output
wrotePreamble = 0
def writePreamble( output ):
'''Write the CxxTest header (#includes and #defines)'''
global wrotePreamble, headers, longlong
if wrotePreamble: return
output.write( "#ifndef CXXTEST_RUNNING\n" )
output.write( "#define CXXTEST_RUNNING\n" )
output.write( "#endif\n" )
output.write( "\n" )
if haveStandardLibrary:
output.write( "#define _CXXTEST_HAVE_STD\n" )
if haveExceptionHandling:
output.write( "#define _CXXTEST_HAVE_EH\n" )
if abortOnFail:
output.write( "#define _CXXTEST_ABORT_TEST_ON_FAIL\n" )
if longlong:
output.write( "#define _CXXTEST_LONGLONG %s\n" % longlong )
if factor:
output.write( "#define _CXXTEST_FACTOR\n" )
for header in headers:
output.write( "#include %s\n" % header )
output.write( "#include <cxxtest/TestListener.h>\n" )
output.write( "#include <cxxtest/TestTracker.h>\n" )
output.write( "#include <cxxtest/TestRunner.h>\n" )
output.write( "#include <cxxtest/RealDescriptions.h>\n" )
if runner:
output.write( "#include <cxxtest/%s.h>\n" % runner )
if gui:
output.write( "#include <cxxtest/%s.h>\n" % gui )
output.write( "\n" )
wrotePreamble = 1
def writeMain( output ):
'''Write the main() function for the test runner'''
if gui:
output.write( 'int main( int argc, char *argv[] ) {\n' )
if noStaticInit:
output.write( ' CxxTest::initialize();\n' )
output.write( ' return CxxTest::GuiTuiRunner<CxxTest::%s, CxxTest::%s>( argc, argv ).run();\n' % (gui, runner) )
output.write( '}\n' )
elif runner:
output.write( 'int main() {\n' )
if noStaticInit:
output.write( ' CxxTest::initialize();\n' )
output.write( ' return CxxTest::%s().run();\n' % runner )
output.write( '}\n' )
wroteWorld = 0
def writeWorld( output ):
'''Write the world definitions'''
global wroteWorld, part
if wroteWorld: return
writePreamble( output )
writeSuites( output )
if root or not part:
writeRoot( output )
if noStaticInit:
writeInitialize( output )
wroteWorld = 1
def writeSuites(output):
'''Write all TestDescriptions and SuiteDescriptions'''
for suite in suites:
writeInclude( output, suite['file'] )
if isGenerated(suite):
generateSuite( output, suite )
if isDynamic(suite):
writeSuitePointer( output, suite )
else:
writeSuiteObject( output, suite )
writeTestList( output, suite )
writeSuiteDescription( output, suite )
writeTestDescriptions( output, suite )
def isGenerated(suite):
'''Checks whether a suite class should be created'''
return suite['generated']
def isDynamic(suite):
'''Checks whether a suite is dynamic'''
return suite.has_key('create')
lastIncluded = ''
def writeInclude(output, file):
'''Add #include "file" statement'''
global lastIncluded
if file == lastIncluded: return
output.writelines( [ '#include "', file, '"\n\n' ] )
lastIncluded = file
def generateSuite( output, suite ):
'''Write a suite declared with CXXTEST_SUITE()'''
output.write( 'class %s : public CxxTest::TestSuite {\n' % suite['name'] )
output.write( 'public:\n' )
for line in suite['lines']:
output.write(line)
output.write( '};\n\n' )
def writeSuitePointer( output, suite ):
'''Create static suite pointer object for dynamic suites'''
if noStaticInit:
output.write( 'static %s *%s;\n\n' % (suite['name'], suite['object']) )
else:
output.write( 'static %s *%s = 0;\n\n' % (suite['name'], suite['object']) )
def writeSuiteObject( output, suite ):
'''Create static suite object for non-dynamic suites'''
output.writelines( [ "static ", suite['name'], " ", suite['object'], ";\n\n" ] )
def writeTestList( output, suite ):
'''Write the head of the test linked list for a suite'''
if noStaticInit:
output.write( 'static CxxTest::List %s;\n' % suite['tlist'] )
else:
output.write( 'static CxxTest::List %s = { 0, 0 };\n' % suite['tlist'] )
def writeTestDescriptions( output, suite ):
'''Write all test descriptions for a suite'''
for test in suite['tests']:
writeTestDescription( output, suite, test )
def writeTestDescription( output, suite, test ):
'''Write test description object'''
output.write( 'static class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
output.write( 'public:\n' )
if not noStaticInit:
output.write( ' %s() : CxxTest::RealTestDescription( %s, %s, %s, "%s" ) {}\n' %
(test['class'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' void runTest() { %s }\n' % runBody( suite, test ) )
output.write( '} %s;\n\n' % test['object'] )
def runBody( suite, test ):
'''Body of TestDescription::run()'''
if isDynamic(suite): return dynamicRun( suite, test )
else: return staticRun( suite, test )
def dynamicRun( suite, test ):
'''Body of TestDescription::run() for test in a dynamic suite'''
return 'if ( ' + suite['object'] + ' ) ' + suite['object'] + '->' + test['name'] + '();'
def staticRun( suite, test ):
'''Body of TestDescription::run() for test in a non-dynamic suite'''
return suite['object'] + '.' + test['name'] + '();'
def writeSuiteDescription( output, suite ):
'''Write SuiteDescription object'''
if isDynamic( suite ):
writeDynamicDescription( output, suite )
else:
writeStaticDescription( output, suite )
def writeDynamicDescription( output, suite ):
'''Write SuiteDescription for a dynamic suite'''
output.write( 'CxxTest::DynamicSuiteDescription<%s> %s' % (suite['name'], suite['dobject']) )
if not noStaticInit:
output.write( '( %s, %s, "%s", %s, %s, %s, %s )' %
(suite['cfile'], suite['line'], suite['name'], suite['tlist'],
suite['object'], suite['create'], suite['destroy']) )
output.write( ';\n\n' )
def writeStaticDescription( output, suite ):
'''Write SuiteDescription for a static suite'''
output.write( 'CxxTest::StaticSuiteDescription %s' % suite['dobject'] )
if not noStaticInit:
output.write( '( %s, %s, "%s", %s, %s )' %
(suite['cfile'], suite['line'], suite['name'], suite['object'], suite['tlist']) )
output.write( ';\n\n' )
def writeRoot(output):
'''Write static members of CxxTest classes'''
output.write( '#include <cxxtest/Root.cpp>\n' )
def writeInitialize(output):
'''Write CxxTest::initialize(), which replaces static initialization'''
output.write( 'namespace CxxTest {\n' )
output.write( ' void initialize()\n' )
output.write( ' {\n' )
for suite in suites:
output.write( ' %s.initialize();\n' % suite['tlist'] )
if isDynamic(suite):
output.write( ' %s = 0;\n' % suite['object'] )
output.write( ' %s.initialize( %s, %s, "%s", %s, %s, %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['name'],
suite['tlist'], suite['object'], suite['create'], suite['destroy']) )
else:
output.write( ' %s.initialize( %s, %s, "%s", %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['name'],
suite['object'], suite['tlist']) )
for test in suite['tests']:
output.write( ' %s.initialize( %s, %s, %s, "%s" );\n' %
(test['object'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' }\n' )
output.write( '}\n' )
main()
| Python |
#!/home/cheshire/install/bin/python -i
# depth first web crawler
import sys, os, re
import urllib
import urlparse
from lxml import etree
import StringIO
import hashlib
from foresite import *
from foresite import conneg
from rdflib import URIRef, Literal
parser = etree.HTMLParser()
nonHttpRe = re.compile("^(mailto|ftp|telnet):(.*)", re.I)
nonHtmlRe = re.compile("\.(pdf|doc|ppt|odp|jpg|png|gif|zip|gz|tgz|bz2|ps|mpg|java|py|c|h|txt|num)$", re.I)
contentTypes = {}
md5Hash = {}
pageHash = {}
starts = []
webGraphs = [{}]
start = "http://www.openarchives.org/ore/1.0/"
restrictTemplates = [re.compile("http://www\.openarchives\.org/ore/1\.0.*")]
stack = [(start, -1)]
srlz = RdfLibSerializer(format='pretty-xml')
aggr = Aggregation(start + '#aggregation')
def crawl(uri, src):
if not pageHash.has_key(uri):
pid = len(pageHash)
pageHash[uri] = pid
else:
pid = pageHash[uri]
linkHash = webGraphs[-1]
if not linkHash.has_key(pid):
linkHash[pid] = []
else:
return
print "processing %s->%s: %s" % (src, pid, uri)
if src != -1:
linkHash[src].append(pid)
#fetch, find links, record, crawl
try:
fh = urllib.urlopen(uri)
except:
print "... BROKEN"
return
ar = AggregatedResource(uri)
ct = fh.headers['content-type']
try:
cl = fh.headers['content-length']
ar._dc.extent = Literal(cl)
except:
pass
try:
lm = fh.headers['last-modified']
ar._dcterms.modified = Literal(lm)
except:
pass
mt = conneg.parse(ct)
if mt:
ct = mt[0].mimetype1 + '/' + mt[0].mimetype2
ar._dc.format = Literal(ct)
if ct != 'text/html':
aggr.add_resource(ar)
try:
contentTypes[ct] += 1
except KeyError:
contentTypes[ct] = 1
return
data = fh.read()
fh.close()
# hash page for redirects/duplicates etc
md5 = hashlib.new('md5')
md5.update(data)
hd = md5.hexdigest()
if md5Hash.has_key(hd):
print "%s == %s" % (pid, md5Hash[hd])
return
else:
md5Hash[hd] = pid
# only add it here
aggr.add_resource(ar)
try:
dom = etree.parse(StringIO.StringIO(data), parser)
except:
print " --- failed to parse"
return
title = dom.xpath('//title/text()')
if title:
ar._dc.title = Literal(title[0])
links = dom.xpath('//a/@href')
frames = dom.xpath('//frame/@src')
links.extend(frames)
imgs = dom.xpath('//img/@src')
links.extend(imgs)
css = dom.xpath('//link/@href')
links.extend(css)
for l in links:
l = l.strip()
if l.find('#') > -1:
l = l[:l.find('#')]
if not l:
# was just a hash URL
continue
if l[0] == "/":
l = urlparse.urljoin(uri, l)
elif l[:7].lower() != "http://" and l[:8].lower() != "https://":
# check other protocols
if nonHttpRe.search(l):
continue
# put in current directory
l = urlparse.urljoin(uri, l)
# check if we really want to crawl...
if nonHtmlRe.search(l):
# ignore common stuff
# print "Skipping: %s" % chk
pass
elif pageHash.has_key(l):
# ignore already done
# print "Skipping: %s" % chk
pass
else:
match = 1
for t in restrictTemplates:
if not t.match(l):
match = 0
break
if match:
stack.append((l, pid))
while stack:
(l, pid) = stack.pop(0)
crawl(l, pid)
rem = aggr.register_serialization(srlz, '#rem')
rd = rem.get_serialization()
print rd.data
| Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
version = '0.9'
setup(name='foresite',
version=version,
description='Library for constructing, parsing, manipulating and serializing OAI-ORE Resource Maps',
long_description="""\
""",
classifiers=[],
author='Rob Sanderson',
author_email='azaroth@liv.ac.uk',
url='http://code.google.com/p/foresite-toolkit/',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['rdflib<3a', 'lxml'],
test_suite='foresite.tests.test_suite'
)
| Python |
#!/home/cheshire/install/bin/python -i
from foresite import *
import urllib2
import os, sys
import getopt
# given an initial starting point, crawl nested and linked ORE aggregations
# download aggregated resources
# content negotiation for prefered ReM format
def usage():
print """Usage:
%s [-r] [-d DEPTH] [-f ReM-Format] [-remDir ReM-Directory]
%s [-resDir Resource-Directory] URI
ReM-Format is one of: xml, atom, rdfa, nt, n3, turtle""" % (sys.argv[0], ' ' * len(sys.argv[0]))
sys.exit(0)
optstr = "rd:f:"
longs = ['remDir=', 'arDir=']
mimeHash = {'atom' : 'application/atom+xml',
'rdfa' : 'application/xhtml+xml',
'xml' : 'application/rdf+xml',
'nt' : 'text/plain',
'n3' : 'text/rdf+n3',
'turtle' : 'application/x-turtle'}
optlist, args = getopt.getopt(sys.argv[1:], optstr, longs)
if len(args) != 1:
usage()
else:
uri = args[0]
maxDepth = -1
fetchAR = 0
remDirectory = 'rems'
arDirectory = 'resources'
accept_header = ''
for o in optlist:
if o[0] == '-d':
try:
maxDepth = int(o[1])
except:
print "DEPTH must be an integer"
usage()
elif o[0] == '-r':
fetchAR = 1
elif o[0] == '--remDir':
remDirectory = o[1]
elif o[0] == '--resDir':
arDirectory = o[1]
elif o[0] == '-f':
if not mimeHash.has_key(o[1]):
print "Unknown format '%s'" % o[1]
usage()
else:
# pass through accept_header
accept_header = '%s;q=1.0' % mimeHash[o[1]]
else:
print "Unknown option: %s" % o[0]
usage()
done = {}
doneAr = {}
stack = {}
p = RdfLibParser()
ap = AtomParser()
rdfap = RdfAParser()
if not os.path.exists(remDirectory):
os.mkdir(remDirectory)
if not os.path.exists(arDirectory):
os.mkdir(arDirectory)
stack[uri] = 0
while stack:
# NB unordered pop
(next, depth) = stack.popitem()
done[next] = 1
if maxDepth > -1 and depth > maxDepth:
continue
print "Fetching %s..." % next
rd = ReMDocument(next, accept=accept_header)
fn = rd.uri.replace('http://', '')
fn = fn.replace('/', '_')
fn = fn.replace('\\', '_')
fn = os.path.join(remDirectory, fn)
fh = open(fn, 'w')
fh.write(rd.data)
fh.close()
try:
if rd.format == 'atom':
rem = ap.parse(rd)
elif rd.format == 'rdfa':
rem = rdfap.parse(rd)
else:
rem = p.parse(rd)
except:
# unparsable
print 'URI %s is unparsable' % next
raise
# XXX Maybe write in alternative formats?
# find refs to all other aggregations
oas = rem.aggregation.do_sparql('SELECT ?a WHERE {?a a ore:Aggregation }')
for oa in oas:
oa = str(oa[0])
if not done.has_key(oa) and not stack.has_key(oa):
stack[oa] = depth + 1
oas = rem.aggregation.do_sparql('SELECT ?a WHERE {?b ore:isAggregatedBy ?a }')
for oa in oas:
oa = str(oa[0])
if not done.has_key(oa) and not stack.has_key(oa):
stack[oa] = depth + 1
if fetchAR:
# find aggregated resources
ars = rem.aggregation.do_sparql('SELECT ?a WHERE {?b ore:aggregates ?a }')
for ar in ars:
ar = str(ar[0])
if not done.has_key(ar) and not stack.has_key(ar) and not doneAr.has_key(ar):
print "Fetching Aggregated Resource: %s..." % ar
req = urllib2.Request(ar)
fh = urllib2.urlopen(req)
data = fh.read()
fh.close()
fn = ar.replace('http://', '')
fn = fn.replace('/', '_')
fn = fn.replace('\\', '_')
fn = os.path.join(arDirectory, fn)
fh = open(fn, 'w')
fh.write(data)
fh.close()
doneAr[ar] = 1
| Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
version = '0.9'
setup(name='foresite',
version=version,
description='Library for constructing, parsing, manipulating and serializing OAI-ORE Resource Maps',
long_description="""\
""",
classifiers=[],
author='Rob Sanderson',
author_email='azaroth@liv.ac.uk',
url='http://code.google.com/p/foresite-toolkit/',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['rdflib<3a', 'lxml'],
test_suite='foresite.tests.test_suite'
)
| Python |
#!/home/cheshire/install/bin/python -i
# depth first web crawler
import sys, os, re
import urllib
import urlparse
from lxml import etree
import StringIO
import hashlib
from foresite import *
from foresite import conneg
from rdflib import URIRef, Literal
parser = etree.HTMLParser()
nonHttpRe = re.compile("^(mailto|ftp|telnet):(.*)", re.I)
nonHtmlRe = re.compile("\.(pdf|doc|ppt|odp|jpg|png|gif|zip|gz|tgz|bz2|ps|mpg|java|py|c|h|txt|num)$", re.I)
contentTypes = {}
md5Hash = {}
pageHash = {}
starts = []
webGraphs = [{}]
start = "http://www.openarchives.org/ore/1.0/"
restrictTemplates = [re.compile("http://www\.openarchives\.org/ore/1\.0.*")]
stack = [(start, -1)]
srlz = RdfLibSerializer(format='pretty-xml')
aggr = Aggregation(start + '#aggregation')
def crawl(uri, src):
if not pageHash.has_key(uri):
pid = len(pageHash)
pageHash[uri] = pid
else:
pid = pageHash[uri]
linkHash = webGraphs[-1]
if not linkHash.has_key(pid):
linkHash[pid] = []
else:
return
print "processing %s->%s: %s" % (src, pid, uri)
if src != -1:
linkHash[src].append(pid)
#fetch, find links, record, crawl
try:
fh = urllib.urlopen(uri)
except:
print "... BROKEN"
return
ar = AggregatedResource(uri)
ct = fh.headers['content-type']
try:
cl = fh.headers['content-length']
ar._dc.extent = Literal(cl)
except:
pass
try:
lm = fh.headers['last-modified']
ar._dcterms.modified = Literal(lm)
except:
pass
mt = conneg.parse(ct)
if mt:
ct = mt[0].mimetype1 + '/' + mt[0].mimetype2
ar._dc.format = Literal(ct)
if ct != 'text/html':
aggr.add_resource(ar)
try:
contentTypes[ct] += 1
except KeyError:
contentTypes[ct] = 1
return
data = fh.read()
fh.close()
# hash page for redirects/duplicates etc
md5 = hashlib.new('md5')
md5.update(data)
hd = md5.hexdigest()
if md5Hash.has_key(hd):
print "%s == %s" % (pid, md5Hash[hd])
return
else:
md5Hash[hd] = pid
# only add it here
aggr.add_resource(ar)
try:
dom = etree.parse(StringIO.StringIO(data), parser)
except:
print " --- failed to parse"
return
title = dom.xpath('//title/text()')
if title:
ar._dc.title = Literal(title[0])
links = dom.xpath('//a/@href')
frames = dom.xpath('//frame/@src')
links.extend(frames)
imgs = dom.xpath('//img/@src')
links.extend(imgs)
css = dom.xpath('//link/@href')
links.extend(css)
for l in links:
l = l.strip()
if l.find('#') > -1:
l = l[:l.find('#')]
if not l:
# was just a hash URL
continue
if l[0] == "/":
l = urlparse.urljoin(uri, l)
elif l[:7].lower() != "http://" and l[:8].lower() != "https://":
# check other protocols
if nonHttpRe.search(l):
continue
# put in current directory
l = urlparse.urljoin(uri, l)
# check if we really want to crawl...
if nonHtmlRe.search(l):
# ignore common stuff
# print "Skipping: %s" % chk
pass
elif pageHash.has_key(l):
# ignore already done
# print "Skipping: %s" % chk
pass
else:
match = 1
for t in restrictTemplates:
if not t.match(l):
match = 0
break
if match:
stack.append((l, pid))
while stack:
(l, pid) = stack.pop(0)
crawl(l, pid)
rem = aggr.register_serialization(srlz, '#rem')
rd = rem.get_serialization()
print rd.data
| Python |
#!/home/cheshire/install/bin/python -i
from foresite import *
import urllib2
import os, sys
import getopt
# given an initial starting point, crawl nested and linked ORE aggregations
# download aggregated resources
# content negotiation for prefered ReM format
def usage():
print """Usage:
%s [-r] [-d DEPTH] [-f ReM-Format] [-remDir ReM-Directory]
%s [-resDir Resource-Directory] URI
ReM-Format is one of: xml, atom, rdfa, nt, n3, turtle""" % (sys.argv[0], ' ' * len(sys.argv[0]))
sys.exit(0)
optstr = "rd:f:"
longs = ['remDir=', 'arDir=']
mimeHash = {'atom' : 'application/atom+xml',
'rdfa' : 'application/xhtml+xml',
'xml' : 'application/rdf+xml',
'nt' : 'text/plain',
'n3' : 'text/rdf+n3',
'turtle' : 'application/x-turtle'}
optlist, args = getopt.getopt(sys.argv[1:], optstr, longs)
if len(args) != 1:
usage()
else:
uri = args[0]
maxDepth = -1
fetchAR = 0
remDirectory = 'rems'
arDirectory = 'resources'
accept_header = ''
for o in optlist:
if o[0] == '-d':
try:
maxDepth = int(o[1])
except:
print "DEPTH must be an integer"
usage()
elif o[0] == '-r':
fetchAR = 1
elif o[0] == '--remDir':
remDirectory = o[1]
elif o[0] == '--resDir':
arDirectory = o[1]
elif o[0] == '-f':
if not mimeHash.has_key(o[1]):
print "Unknown format '%s'" % o[1]
usage()
else:
# pass through accept_header
accept_header = '%s;q=1.0' % mimeHash[o[1]]
else:
print "Unknown option: %s" % o[0]
usage()
done = {}
doneAr = {}
stack = {}
p = RdfLibParser()
ap = AtomParser()
rdfap = RdfAParser()
if not os.path.exists(remDirectory):
os.mkdir(remDirectory)
if not os.path.exists(arDirectory):
os.mkdir(arDirectory)
stack[uri] = 0
while stack:
# NB unordered pop
(next, depth) = stack.popitem()
done[next] = 1
if maxDepth > -1 and depth > maxDepth:
continue
print "Fetching %s..." % next
rd = ReMDocument(next, accept=accept_header)
fn = rd.uri.replace('http://', '')
fn = fn.replace('/', '_')
fn = fn.replace('\\', '_')
fn = os.path.join(remDirectory, fn)
fh = open(fn, 'w')
fh.write(rd.data)
fh.close()
try:
if rd.format == 'atom':
rem = ap.parse(rd)
elif rd.format == 'rdfa':
rem = rdfap.parse(rd)
else:
rem = p.parse(rd)
except:
# unparsable
print 'URI %s is unparsable' % next
raise
# XXX Maybe write in alternative formats?
# find refs to all other aggregations
oas = rem.aggregation.do_sparql('SELECT ?a WHERE {?a a ore:Aggregation }')
for oa in oas:
oa = str(oa[0])
if not done.has_key(oa) and not stack.has_key(oa):
stack[oa] = depth + 1
oas = rem.aggregation.do_sparql('SELECT ?a WHERE {?b ore:isAggregatedBy ?a }')
for oa in oas:
oa = str(oa[0])
if not done.has_key(oa) and not stack.has_key(oa):
stack[oa] = depth + 1
if fetchAR:
# find aggregated resources
ars = rem.aggregation.do_sparql('SELECT ?a WHERE {?b ore:aggregates ?a }')
for ar in ars:
ar = str(ar[0])
if not done.has_key(ar) and not stack.has_key(ar) and not doneAr.has_key(ar):
print "Fetching Aggregated Resource: %s..." % ar
req = urllib2.Request(ar)
fh = urllib2.urlopen(req)
data = fh.read()
fh.close()
fn = ar.replace('http://', '')
fn = fn.replace('/', '_')
fn = fn.replace('\\', '_')
fn = os.path.join(arDirectory, fn)
fh = open(fn, 'w')
fh.write(data)
fh.close()
doneAr[ar] = 1
| Python |
import os
import urllib, urllib2
from rdflib import ConjunctiveGraph, URIRef, BNode, Literal
from utils import *
from StringIO import StringIO
from utils import unconnectedAction
from foresite import libraryName, libraryUri, libraryEmail
from foresite import conneg
# --- Object Class Definitions ---
class Graph(ConjunctiveGraph):
def __init__(self, store=None, id=None):
# id *should* be aggregation URI
if store != None and id != None:
ConjunctiveGraph.__init__(self, store, id)
else:
ConjunctiveGraph.__init__(self)
for (key,val) in namespaces.iteritems():
self.bind(key, val)
def find_namespace(self, name):
# find best namespace
for k in namespaceSearchOrder:
v = elements[k]
if name in v:
return namespaces[k]
return ''
def split_uri(self, uri):
# given namespaced uri, find base property name
slsplit = uri.split('/')
hsplit = slsplit[-1].split('#')
return (uri[:0-len(hsplit[-1])], hsplit[-1])
class OREResource(object):
graph = None
uri = ""
currNs = ""
agents = {}
triples = {}
aggregations = []
def __init__(self, uri):
graph = Graph()
self._graph_ = graph
if isinstance(uri, URIRef) or isinstance(uri, BNode):
self._uri_ = uri
else:
self._uri_ = URIRef(uri)
self._currNs_ = ''
self._agents_ = {}
self._triples_ = {}
self._aggregations_ = []
def __str__(self):
return str(self.uri)
def __getattr__(self, name):
# fetch value from graph
cns = self.currNs
if name[0] == "_" and name[-1] == "_":
return getattr(self, name[1:-1])
elif name[0] == "_" and namespaces.has_key(name[1:]):
# we're looking for self.namespace.property
self._currNs_ = name[1:]
return self
elif cns:
val = self.get_value(name, cns)
self._currNs_ = ''
else:
val = self.get_value(name)
return val
def __setattr__(self, name, value):
if name[0] == "_" and name[-1] == "_":
return object.__setattr__(self, name[1:-1], value)
elif name[0] == "_" and namespaces.has_key(name[1:]):
# we're looking for self.namespace.property
object.__setattr__(self, 'currNs', name[1:])
return self
elif self.currNs:
val = self.set_value(name, value, self.currNs)
else:
val = self.set_value(name, value)
object.__setattr__(self, 'currNs', '')
return val
def set_value(self, name, value, ns=None):
if ns:
nsobj = namespaces[ns]
else:
nsobj = self.graph.find_namespace(name)
if value == []:
for val in self.graph.objects(self.uri, nsobj[name]):
self.graph.remove((self.uri, nsobj[name], val))
else:
if not isinstance(value, URIRef) and not isinstance(value, BNode):
value = Literal(value)
self.graph.add((self.uri, nsobj[name], value))
return 1
def get_value(self, name, ns=None):
if ns:
nsobj = namespaces[ns]
else:
nsobj = self.graph.find_namespace(name)
l = []
for obj in self.graph.objects(self.uri, nsobj[name]):
l.append(obj)
return l
def add_triple(self, trip):
self._triples_[trip._uri_] = trip
def remove_triple(self, trip):
del self._triples_[trip._uri_]
def predicates(self):
return list(self.graph.predicates())
def add_agent(self, who, type):
self._agents_[who._uri_] = who
setattr(self, type, who._uri_)
def remove_agent(self, who, type):
del self._agents_[who._uri_]
ns = self.graph.find_namespace(type)
self._graph_.remove((self._uri_, ns[type], who._uri_))
def on_add(self, aggr, proxy):
self._aggregations_.append((aggr, proxy))
def on_remove(self, aggr, proxy):
self._aggregations_.remove((aggr, proxy))
def get_proxy(self, aggr=None):
if aggr:
for (a,p) in self._aggregations_:
if a == aggr:
return p
return None
elif self._aggregations_:
return self._aggregations_[0][1]
else:
return None
class ResourceMap(OREResource):
aggregation = None
serializer = None
def __init__(self, uri):
OREResource.__init__(self, uri)
self._aggregation_ = None
self._serializer_ = None
self.type = namespaces['ore']['ResourceMap']
self.add_triple(rem_type)
def register_serializer(self, serializer):
# Deprecated
self.register_serialization(serializer)
def register_serialization(self, serializer):
if self.serializer:
raise OreException("ResourceMap already has serializer")
if not serializer.mimeType in self._dc.format:
self.format = serializer.mimeType
self._serializer_ = serializer
def get_serialization(self, page=-1):
return self._serializer_.serialize(self)
def set_aggregation(self, agg):
if self.aggregation:
raise OreException("ResourceMap already has an aggregation set")
self._aggregation_ = agg
self.describes = agg.uri
agg.on_describe(self)
class Aggregation(OREResource):
resourceMaps = []
resources = []
fullGraph = None
def __init__(self, uri):
OREResource.__init__(self, uri)
self._resources_ = []
self._resourceMaps_ = []
self._fullGraph_ = None
self._generateProxies_ = False
self.type = namespaces['ore']['Aggregation']
self.add_triple(aggr_type)
def __iter__(self):
l = [x[0] for x in self._resources_]
return l.__iter__()
def __len__(self):
return len(self._resources_)
def __contains__(self, what):
for x in self._resources_:
if what in x or what == str(x[0].uri) or what == str(x[1].uri):
return True
return False
def __getitem__(self, x):
if isinstance(x, int):
return self.resources[x][0]
if isinstance(x, str):
x = URIRef(x)
for r in self.resources:
if x == r[0].uri:
return r[0]
raise KeyError(x)
def on_describe(self, rem):
self._resourceMaps_.append(rem)
def add_resource(self, res, proxy=None):
for x in self._resources_:
if x[0] == res:
raise KeyError('Aggregation %s already aggregates %s' % (self.uri, res.uri))
self.aggregates = res.uri
if proxy or self.generateProxies:
if not proxy:
uri = gen_proxy_uri(res, self)
proxy = Proxy(uri)
proxy.set_forIn(res, self)
else:
proxy = None
self._resources_.append((res, proxy))
res.on_add(self, proxy)
return proxy
# List API
def append(self, res):
self.add_resource(res)
# Set API
def add(self, res):
self.add_resource(res)
def remove_resource(self, res):
tup = None
for x in self._resources_:
if x[0] == res:
tup = x
break
if tup:
self._resources_.remove(tup)
res.on_remove(self, tup[1])
del tup[1]
# List, Set API
def remove(self, res):
self.remove_resource(res)
# Set API
def discard(self, res):
self.remove_resource(res)
def get_authoritative(self):
rems = []
for rem in self.resourceMaps:
if self.uri in rem._orex.isAuthoritativeFor:
rems.append(rem)
return rems
def _merge_all_graphs(self, public=1, top=1):
# Only used for sparql query across everything, not serialization
g = Graph()
for rem in self.resourceMaps:
g += rem._graph_
for at in rem._triples_.values():
g += at._graph_
for c in rem._agents_.values():
g += c._graph_
if not rem.created:
g.add((rem._uri_, namespaces['dcterms']['created'], Literal(now())))
g.add((rem._uri_, namespaces['dcterms']['modified'], Literal(now())))
aggr = self
g += aggr._graph_
for at in aggr._triples_.values():
g += at._graph_
for c in aggr._agents_.values():
g += c._graph_
for (res, proxy) in aggr._resources_:
g += res._graph_
if proxy:
g += proxy._graph_
for at in res._triples_.values():
g += at._graph_
for c in res._agents_.values():
g += c._graph_
if isinstance(res, Aggregation):
# include nestings recursively
g += res._merge_all_graphs(public, top=0)
if not g.connected() and unconnectedAction != 'ignore':
raise OreException("Must have connected graph")
if public:
# Remove internal methods
for p in internalPredicates:
for (s,o) in g.subject_objects(p):
g.remove((s,p,o))
if top and not g.objects((aggr._uri_, namespaces['ore']['aggregates'])):
raise OreException("Aggregation must aggregate something")
return g
def do_sparql(self, sparql):
# first merge graphs
g = self._merge_all_graphs()
# now do sparql query on merged graph
return g.query(sparql, initNs=namespaces)
def register_serialization(self, serializer, uri='', **kw):
# Create ReM etc.
if not uri:
if self.uri.find('#') > -1:
uri = self.uri + "_ResourceMap"
else:
uri = self.uri + "#ResourceMap"
rem = ResourceMap(uri)
rem.set_aggregation(self)
rem.register_serializer(serializer)
for (k,v) in kw.iteritems():
if isinstance(v, Agent):
rem.add_agent(v, k)
elif isinstance(v, ArbitraryResource):
setattr(rem, k, v.uri)
rem.add_triple(v)
else:
setattr(rem, k, v)
return rem
def get_serialization(self, uri='', page=-1):
if not uri:
rem = self.resourceMaps[0]
else:
rem = None
for r in self.resourceMaps:
if str(r.uri) == uri:
rem = r
break
if not rem:
raise OreException("Unknown Resource Map: %s" % uri)
return rem.get_serialization()
class Proxy(OREResource):
resource = None
aggregation = None
def __init__(self, uri):
OREResource.__init__(self, uri)
self.type = namespaces['ore']['Proxy']
self._resource_ = None
self._aggregation_ = None
def set_forIn(self, res, aggr):
self.proxyFor = res.uri
self._resource_ = res
self.proxyIn = aggr.uri
self._aggregation_ = aggr
class Agent(OREResource):
def __init__(self, uri=''):
if not uri:
if assignAgentUri:
uri = "urn:uuid:%s" % gen_uuid()
else:
uri = BNode()
OREResource.__init__(self, uri)
class AggregatedResource(OREResource):
# Convenience class for OREResource
pass
class ArbitraryResource(OREResource):
# To allow for arbitrary triples that aren't one of the major
# ORE classes
def __init__(self, uri=None):
if uri == None:
OREResource.__init__(self, BNode())
else:
OREResource.__init__(self, uri)
class ReMDocument(StringIO):
# Serialisation of objects
uri = ""
mimeType = ""
data = ""
format = "" # rdflib name for format
def __init__(self, uri, data='', filename='', mimeType='', format ='', accept=''):
self.uri = uri
if data:
self.data = data
elif filename:
if os.path.exists(filename):
fh = file(filename)
self.data = fh.read()
fh.close()
else:
# try to fetch uri
try:
req = urllib2.Request(uri)
if accept:
# add custom accept header
req.add_header('Accept', accept)
else:
# otherwise add default
req.add_header('Accept', accept_header)
fh = urllib2.urlopen(req)
self.data = fh.read()
self.info = fh.info()
mimeType = self.info.dict.get('content-type', mimeType)
self.uri = fh.geturl()
fh.close()
except:
raise OreException('ReMDocument must either have data or filename')
if not format:
try:
mt = conneg.parse(mimeType)
if mt:
mimeType = mt[0].mimetype1 + '/' + mt[0].mimetype2
except:
pass
mimeHash = {'application/atom+xml' : 'atom',
'application/xhtml+xml' : 'rdfa',
'application/rdf+xml' : 'xml',
'text/plain' : 'nt', # yes, really
'text/rdf+n3' : 'n3',
'application/x-turtle' : 'turtle',
'application/rdf+nt' : 'nt'}
format = mimeHash.get(mimeType, '')
self.mimeType = mimeType
self.format = format
StringIO.__init__(self, self.data)
rem_type = ArbitraryResource(namespaces['ore']['ResourceMap'])
rem_type.label = "ResourceMap"
rem_type.isDefinedBy = namespaces['ore']
aggr_type = ArbitraryResource(namespaces['ore']['Aggregation'])
aggr_type.label = "Aggregation"
aggr_type.isDefinedBy = namespaces['ore']
foresiteAgent = Agent(libraryUri)
foresiteAgent._foaf.name = libraryName
foresiteAgent._foaf.mbox = libraryEmail
| Python |
import doctest
import unittest
import glob
import os
optionflags = (doctest.REPORT_ONLY_FIRST_FAILURE |
doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
def open_file(filename, mode='r'):
"""Helper function to open files from within the tests package."""
return open(os.path.join(os.path.dirname(__file__), filename), mode)
def setUp(test):
test.globs.update(dict(
open_file = open_file,
))
def test_suite():
return unittest.TestSuite([
doctest.DocFileSuite(
'README.txt',
package='foresite',
optionflags=optionflags,
setUp=setUp
)])
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=1)
runner.run(test_suite())
| Python |
import re
from ore import *
from ore import foresiteAgent
from foresite import libraryName, libraryUri, libraryVersion
from utils import namespaces, OreException, unconnectedAction, pageSize
from utils import gen_uuid, build_html_atom_content
from rdflib import URIRef, BNode, Literal, plugin, syntax, RDF
from rdflib.util import uniq
from lxml import etree
from lxml.etree import Element, SubElement
plugin.register('rdfa', syntax.serializers.Serializer, 'foresite.RDFaSerializer', 'RDFaSerializer')
plugin.register('json', syntax.serializers.Serializer, 'foresite.JsonSerializer', 'JsonSerializer')
plugin.register('pretty-json', syntax.serializers.Serializer, 'foresite.JsonSerializer', 'PrettyJsonSerializer')
class ORESerializer(object):
# Take objects and produce data
mimeType = ""
format = ""
public = 1
def __init__(self, format, public=1):
mimetypes = {'atom' : 'application/atom+xml',
'rdfa' : 'application/xhtml+xml',
'xml' : 'application/rdf+xml',
'nt' : 'text/plain',
'n3' : 'text/rdf+n3',
'turtle' : 'application/x-turtle',
'pretty-xml' : 'application/rdf+xml'
}
self.extensions = {'atom': 'atom',
'rdfa' : 'xhtml',
'xml' : 'xml',
'nt' : 'nt',
'n3' : 'n3',
'turtle' : 'ttl',
'pretty-xml' : 'pretty.xml'
}
self.format = format
self.public = public
self.mimeType = mimetypes.get(format, '')
def merge_graphs(self, rem, page=-1):
g = Graph()
# Put in some sort of recognition of library?
n = now()
if not rem.created:
rem._dcterms.created = n
rem._dcterms.modified = n
if not rem._dcterms.creator:
rem.add_agent(foresiteAgent, 'creator')
aggr = rem.aggregation
stack = [rem, aggr]
if page != -1:
# first is 1, 2, 3 ...
start = (page-1) * pageSize
tosrlz = aggr._resources_[start:start+pageSize]
else:
tosrlz = aggr._resources_
remove = []
for (r, p) in tosrlz:
if isinstance(r, Aggregation):
for a in r._ore.aggregates:
remove.append((r._uri_, namespaces['ore']['aggregates'], a))
stack.extend([r, p])
done = []
while stack:
what = stack.pop(0)
if what == None or what in done:
continue
done.append(what)
g += what._graph_
for at in what._triples_.values():
stack.append(at)
for who in what._agents_.values():
stack.append(who)
if self.public:
# Remove internal methods
for p in internalPredicates:
for (s,o) in g.subject_objects(p):
g.remove((s,p,o))
for trip in remove:
g.remove(trip)
if not aggr._resources_:
raise OreException("Aggregation must aggregate something")
g = self.connected_graph(g, aggr._uri_)
return g
def connected_graph(self, graph, uri):
if unconnectedAction == 'ignore':
return graph
g = Graph()
all_nodes = list(graph.all_nodes())
all_nodes = filter(lambda y: not isinstance(y, Literal), all_nodes)
discovered = {}
visiting = [uri]
while visiting:
x = visiting.pop()
if not discovered.has_key(x):
discovered[x] = 1
for (p, new_x) in graph.predicate_objects(subject=x):
g.add((x,p,new_x))
if (isinstance(new_x, URIRef) or isinstance(new_x, BNode)) and not discovered.has_key(new_x) and not new_x in visiting:
visiting.append(new_x)
for (new_x, p) in graph.subject_predicates(object=x):
g.add((new_x,p,x))
if (isinstance(new_x, URIRef) or isinstance(new_x, BNode)) and not discovered.has_key(new_x) and not new_x in visiting:
visiting.append(new_x)
if len(discovered) != len(all_nodes):
if unconnectedAction == 'warn':
print "Warning: Graph is unconnected, some nodes being dropped"
elif unconnectedAction == 'raise':
raise OreException('Graph to be serialized is unconnected')
elif unconnectedAction != 'drop':
raise ValueError('Unknown unconnectedAction setting: %s' % unconnectedAction)
return g
class RdfLibSerializer(ORESerializer):
def serialize(self, rem, page=-1):
g = self.merge_graphs(rem, page)
data = g.serialize(format=self.format)
uri = str(rem._uri_)
rd = ReMDocument(uri, data, format=self.format, mimeType=self.mimeType)
return rd
class AtomSerializer(ORESerializer):
def __init__(self, format="atom", public=1):
ORESerializer.__init__(self, format)
self.spacesub = re.compile('(?<=>)[ ]+(?=<)')
self.done_triples = []
def generate_rdf(self, parent, sg):
# remove already done, then serialize to rdf/xml
for t in self.done_triples:
sg.remove(t)
data = sg.serialize(format='xml')
root = etree.fromstring(data)
for child in root:
parent.append(child)
def make_agent(self, parent, agent):
n = SubElement(parent, '{%s}name' % namespaces['atom'])
try:
name = agent._foaf.name[0]
n.text = str(name)
self.done_triples.append((agent._uri_, namespaces['foaf']['name'], name))
except:
pass
if agent._foaf.mbox:
n = SubElement(parent, '{%s}email' % namespaces['atom'])
mb = agent._foaf.mbox[0]
self.done_triples.append((agent._uri_, namespaces['foaf']['mbox'], mb))
mb = str(mb)
if mb[:7] == "mailto:":
mb = mb[7:]
n.text = mb
# There's currently nowhere for URI to go!
#if not isinstance(agent._uri_, BNode):
# n = SubElement(parent, 'uri')
# n.text = str(agent._uri_)
# Silly, but it's what the spec says...
if agent._foaf.page:
n = SubElement(parent, '{%s}uri' % namespaces['atom'])
fp = agent._foaf.page[0]
self.done_triples.append((agent._uri_, namespaces['foaf']['page'], fp))
n.text = fp
def make_link(self, parent, rel, t, g):
iana = str(namespaces['iana'])
if rel.startswith(iana):
rel = rel[len(iana):]
e = SubElement(parent, '{%s}link' % namespaces['atom'], rel=rel, href=str(t))
fmts = list(g.objects(t, namespaces['dc']['format']))
if fmts:
f = fmts[0]
e.set('type', str(f))
self.done_triples.append((t, namespaces['dc']['format'], f))
langs = list(g.objects(t, namespaces['dc']['language']))
if langs:
l = langs[0]
e.set('hreflang', str(langs[0]))
self.done_triples.append((t, namespaces['dc']['language'], l))
exts = list(g.objects(t, namespaces['dc']['extent']))
if exts:
l = exts[0]
e.set('length', str(l))
self.done_triples.append((t, namespaces['dc']['extent'], l))
titls = list(g.objects(t, namespaces['dc']['title']))
if titls:
l = titls[0]
e.set('title', str(l))
self.done_triples.append((t, namespaces['dc']['title'], l))
def serialize(self, rem, page=-1):
aggr = rem._aggregation_
g = self.merge_graphs(rem)
# make nsmap better
nm = g.namespace_manager
nsmap = {'atom' : str(namespaces['atom'])}
poss = uniq(g.predicates()) + uniq(g.objects(None, RDF.type))
for pred in poss:
pf,ns,l = nm.compute_qname(pred)
nsmap[pf] = ns
root = Element("{%s}entry" % namespaces['atom'], nsmap=nsmap)
# entry/id == tag for entry == ReM dc:identifier
# if not exist, generate Yet Another uuid
e = SubElement(root, '{%s}id' % namespaces['atom'])
if rem._dc.identifier:
dcid = rem._dc.identifier[0]
e.text = str(dcid)
self.done_triples.append((rem._uri_, namespaces['dc']['identifier'], dcid))
else:
e.text = "urn:uuid:%s" % gen_uuid()
# entry/title == Aggr's dc:title
title = aggr._dc.title
tns = 'dc'
if not title:
title = aggr._dcterms.title
tns = 'dcterms'
if not title:
raise OreException("Atom Serialisation requires title on aggregation")
else:
e = SubElement(root, '{%s}title' % namespaces['atom'])
dctit = title[0]
e.text = str(dctit)
self.done_triples.append((aggr._uri_, namespaces[tns]['title'], dctit))
# entry/author == Aggr's dcterms:creator
for who in aggr._dcterms.creator:
e = SubElement(root, '{%s}author' % namespaces['atom'])
agent = aggr._agents_[who]
self.make_agent(e, agent)
self.done_triples.append((aggr._uri_, namespaces['dcterms']['creator'], agent._uri_))
# entry/contributor == Aggr's dcterms:contributor
for bn in aggr._dcterms.contributor:
e = SubElement(root, '{%s}contributor' % namespaces['atom'])
agent = aggr._agents_[who]
self.make_agent(e, agent)
self.done_triples.append((aggr._uri_, namespaces['dcterms']['contributor'], agent._uri_))
# entry/category[@scheme="(magic)"][@term="(datetime)"]
for t in aggr._dcterms.created:
t = t.strip()
e = SubElement(root, '{%s}category' % namespaces['atom'], term=str(t),
scheme="http://www.openarchives.org/ore/terms/datetime/created")
for t in aggr._dcterms.modified:
t = t.strip()
e = SubElement(root, '{%s}category' % namespaces['atom'], term=str(t),
scheme="http://www.openarchives.org/ore/terms/datetime/modified")
# entry/category == Aggr's rdf:type
for t in aggr._rdf.type:
e = SubElement(root, '{%s}category' % namespaces['atom'], term=str(t))
try:
scheme = list(g.objects(t, namespaces['rdfs']['isDefinedBy']))[0]
e.set('scheme', str(scheme))
self.done_triples.append((t, namespaces['rdfs']['isDefinedBy'], scheme))
except:
pass
try:
label = list(g.objects(t, namespaces['rdfs']['label']))[0]
e.set('label', str(label))
self.done_triples.append((t, namespaces['rdfs']['label'], label))
except:
pass
self.done_triples.append((aggr._uri_, namespaces['rdf']['type'], t))
# entry/summary
if aggr._dc.description:
e = SubElement(root, '{%s}summary' % namespaces['atom'])
desc = aggr._dc.description[0]
e.text = str(desc)
self.done_triples.append((aggr._uri_, namespaces['dc']['description'], desc))
# All aggr links:
done = [namespaces['rdf']['type'],
namespaces['ore']['aggregates'],
namespaces['dcterms']['creator'],
namespaces['dcterms']['contributor'],
namespaces['dc']['title'],
namespaces['dc']['description']
]
for (p, o) in g.predicate_objects(aggr.uri):
if not p in done:
if isinstance(o, URIRef):
self.make_link(root, p, o, g)
self.done_triples.append((aggr._uri_, p, o))
# entry/content // link[@rel="alternate"]
# Do we have a splash page?
altDone = 0
atypes = aggr._rdf._type
possAlts = []
for (r, p) in aggr.resources:
mytypes = r._rdf.type
if namespaces['eurepo']['humanStartPage'] in mytypes:
altDone = 1
self.make_link(root, 'alternate', r.uri, g)
break
# check if share non Aggregation type
# eg aggr == article and aggres == article, likely
# to be good alternate
for m in mytypes:
if m != namespaces['ore']['Aggregation'] and \
m in atypes:
possAlt.append(r.uri)
if not altDone and possAlts:
# XXX more intelligent algorithm here
self.make_link(root, '{%s}alternate' % namespaces['atom'], possAlts[0], g)
altDone = 1
if not altDone and build_html_atom_content:
e = SubElement(root, '{%s}content' % namespaces['atom'])
e.set('type', 'html')
# make some representative html
# this can get VERY LONG so default to not doing this
html = ['<ul>']
for (r, p) in aggr.resources:
html.append('<li><a href="%s">%s</a></li>' % (r.uri, r.title[0]))
html.append('</ul>')
e.text = '\n'.join(html)
else:
e = SubElement(root, '{%s}content' % namespaces['atom'])
e.set('type', 'html')
e.text = "No Content"
# entry/link[@rel='self'] == URI-R
self.make_link(root, 'self', rem._uri_, g)
# entry/link[@rel='ore:describes'] == URI-A
self.make_link(root, namespaces['ore']['describes'], aggr._uri_, g)
### These are generated automatically in merge_graphs
# entry/published == ReM's dcterms:created
if rem._dcterms.created:
e = SubElement(root, '{%s}published' % namespaces['atom'])
c = rem._dcterms.created[0]
md = str(c)
if md.find('Z') == -1:
# append Z
md += "Z"
e.text = md
self.done_triples.append((rem._uri_, namespaces['dcterms']['created'], c))
# entry/updated == ReM's dcterms:modified
e = SubElement(root, '{%s}updated' % namespaces['atom'])
if rem._dcterms.modified:
c = rem._dcterms.modified[0]
md = str(c)
if md.find('Z') == -1:
# append Z
md += "Z"
e.text = str(md)
self.done_triples.append((rem._uri_, namespaces['dcterms']['modified'], c))
else:
e.text = now()
# entry/rights == ReM's dc:rights
if rem._dc.rights:
e = SubElement(root, '{%s}rights' % namespaces['atom'])
r = rem._dc.rights[0]
e.text = str(r)
self.done_triples.append((rem._uri_, namespaces['dc']['rights'], r))
# entry/source/author == ReM's dcterms:creator
if rem._dcterms.creator:
# Should at least be our generator! (right?)
src = SubElement(root, '{%s}source' % namespaces['atom'])
for who in rem._dcterms.creator:
e = SubElement(src, '{%s}author' % namespaces['atom'])
agent = rem._agents_[who]
self.make_agent(e, agent)
self.done_triples.append((rem._uri_, namespaces['dcterms']['creator'], agent._uri_))
for who in rem._dcterms.contributor:
e = SubElement(src, '{%s}contributor' % namespaces['atom'])
agent = rem._agents_[who]
self.make_agent(e, agent)
self.done_triples.append((rem._uri_, namespaces['dcterms']['contributor'], agent._uri_))
e = SubElement(src, '{%s}generator' % namespaces['atom'], uri=str(libraryUri), version=str(libraryVersion))
e.text = str(libraryName)
# Remove aggregation, resource map props already done
# All of agg res needs to be done
for (r, p) in aggr.resources:
self.make_link(root, namespaces['ore']['aggregates'], r.uri, g)
self.done_triples.append((aggr._uri_, namespaces['ore']['aggregates'], r._uri_))
# Now create ore:triples
# and populate with rdf/xml
trips = SubElement(root, '{%s}triples' % namespaces['ore'])
self.generate_rdf(trips, g)
data = etree.tostring(root, pretty_print=True)
#data = data.replace('\n', '')
#data = self.spacesub.sub('', data)
uri = str(rem._uri_)
self.done_triples = []
return ReMDocument(uri, data, format='atom', mimeType=self.mimeType)
class OldAtomSerializer(ORESerializer):
def __init__(self, format="atom0.9", public=1):
ORESerializer.__init__(self, format)
self.spacesub = re.compile('(?<=>)[ ]+(?=<)')
self.done_triples = []
def remove_link_attrs(self, sg, a):
# only remove first from each list
for ns in (namespaces['dc']['format'], namespaces['dc']['title'], namespaces['dc']['language'], namespaces['dc']['extent']):
objs = list(sg.objects(a, ns))
if objs:
sg.remove((a, ns, objs[0]))
def generate_rdf(self, parent, what):
# extract not processed parts of graph
# serialise with rdflib
# parse with lxml and add to parent element
sg = Graph()
sg += what.graph
for at in what.triples.values():
sg += at.graph
for a in what.agents.values():
sg += a.graph
for a in what.type:
for b in sg.objects(a, namespaces['rdfs']['isDefinedBy']):
sg.remove((a, namespaces['rdfs']['isDefinedBy'], b))
for b in sg.objects(a, namespaces['rdfs']['label']):
sg.remove((a, namespaces['rdfs']['label'], b))
sg.remove((what.uri, namespaces['rdf']['type'], a))
for t in self.done_triples:
sg.remove(t)
if isinstance(what, Aggregation) or isinstance(what, AggregatedResource):
# remove atom srlzd bits
self.remove_link_attrs(sg, what.uri)
try:
sg.remove((what.uri, namespaces['dc']['description'], what.description[0]))
except:
pass
for a in what.creator:
sg.remove((what.uri, namespaces['dcterms']['creator'], a))
for a in what.contributor:
sg.remove((what.uri, namespaces['dcterms']['contributor'], a))
for a in what._ore.similarTo:
self.remove_link_attrs(sg, a)
sg.remove((what.uri, namespaces['ore']['similarTo'], a))
for a in what._ore.aggregates:
sg.remove((what.uri, namespaces['ore']['aggregates'], a))
try:
# aggregation uses dcterms rights, as it's a URI
for a in what._dcterms.rights:
self.remove_link_attrs(sg, a)
sg.remove((what.uri, namespaces['dcterms']['rights'], a))
except:
pass
try:
sg.remove((what.uri, namespaces['foaf']['logo'], what._foaf.logo))
except:
pass
if isinstance(what, Aggregation):
for a in sg.objects(what.uri, namespaces['ore']['isDescribedBy']):
self.remove_link_attrs(sg, a)
sg.remove((what.uri, namespaces['ore']['isDescribedBy'], a))
self.done_triples.extend(list(sg))
else:
# remove isAggregatedBy == rel=related
for a in what._ore.isAggregatedBy:
sg.remove((what.uri, namespaces['ore']['isAggregatedBy'], a))
self.done_triples = []
# and add in proxy info
proxy = what._currProxy_
if proxy:
sg += proxy.graph
for a in proxy._agents_.values():
sg += a.graph
# remove proxyFor, proxyIn
for a in proxy._ore.proxyFor:
sg.remove((proxy.uri, namespaces['ore']['proxyFor'], a))
for a in proxy._ore.proxyIn:
sg.remove((proxy.uri, namespaces['ore']['proxyIn'], a))
for a in proxy.type:
for b in sg.objects(a, namespaces['rdfs']['isDefinedBy']):
sg.remove((a, namespaces['rdfs']['isDefinedBy'], b))
for b in sg.objects(a, namespaces['rdfs']['label']):
sg.remove((a, namespaces['rdfs']['label'], b))
sg.remove((proxy.uri, namespaces['rdf']['type'], a))
elif isinstance(what, ResourceMap):
self.remove_link_attrs(sg, what.uri)
for a in what.describes:
sg.remove((what.uri, namespaces['ore']['describes'], a))
for a in what.creator:
sg.remove((what.uri, namespaces['dcterms']['creator'], a))
try:
# ReM uses dc rights, as it's a string
sg.remove((what.uri, namespaces['dc']['rights'], what._dc.rights[0]))
except:
pass
try:
sg.remove((what.uri, namespaces['dcterms']['modified'], what._dcterms.modified[0]))
except:
pass
try:
sg.remove((what.uri, namespaces['foaf']['logo'], what._foaf.logo[0]))
except:
pass
try:
sg.remove((what.uri, namespaces['ore']['describes'], what._ore.describes[0]))
except:
pass
self.done_triples = []
data = sg.serialize(format='xml')
root = etree.fromstring(data)
for child in root:
parent.append(child)
def make_agent(self, parent, agent):
n = SubElement(parent, 'name')
try:
name = agent._foaf.name[0]
n.text = str(name)
self.done_triples.append((agent._uri_, namespaces['foaf']['name'], name))
except:
# allow blank names where unknown
pass
if agent._foaf.mbox:
n = SubElement(parent, 'email')
mb = agent._foaf.mbox[0]
self.done_triples.append((agent._uri_, namespaces['foaf']['mbox'], mb))
mb = str(mb)
# Strip mailto: (eg not a URI any more)
if mb[:7] == "mailto:":
mb = mb[7:]
n.text = mb
if not isinstance(agent._uri_, BNode):
n = SubElement(parent, 'uri')
n.text = str(agent._uri_)
def make_link(self, parent, rel, t, g):
e = SubElement(parent, 'link', rel=rel, href=str(t))
# look for format, language, extent of t
fmts = list(g.objects(t, namespaces['dc']['format']))
if fmts:
e.set('type', str(fmts[0]))
langs = list(g.objects(t, namespaces['dc']['language']))
if langs:
e.set('hreflang', str(langs[0]))
exts = list(g.objects(t, namespaces['dc']['extent']))
if exts:
e.set('length', str(exts[0]))
titls = list(g.objects(t, namespaces['dc']['title']))
if titls:
e.set('title', str(titls[0]))
def serialize(self, rem, page=-1):
aggr = rem._aggregation_
# Check entire graph is connected
g = self.merge_graphs(rem)
if namespaces.has_key(''):
del namespaces[u'']
root = Element("feed", nsmap=namespaces)
#namespaces[''] = myNamespace
## Aggregation Info
e = SubElement(root, 'id')
e.text = str(aggr.uri)
if not aggr._dc.title:
raise OreException("Atom Serialisation requires title on aggregation")
else:
e = SubElement(root, 'title')
e.text = str(aggr._dc.title[0])
if aggr._dc.description:
e = SubElement(root, 'subtitle')
e.text = str(aggr._dc.description[0])
for who in aggr._dcterms.creator:
e = SubElement(root, 'author')
agent = aggr._agents_[who]
self.make_agent(e, agent)
for bn in aggr._dcterms.contributor:
e = SubElement(root, 'contributor')
agent = aggr._agents_[bn]
self.make_agent(e, agent)
for t in aggr._ore.similarTo:
self.make_link(root, 'related', t, g)
for t in aggr._dcterms.rights:
self.make_link(root, 'license', t, g)
for t in aggr._rdf.type:
e = SubElement(root, 'category', term=str(t))
try:
scheme = list(g.objects(t, namespaces['rdfs']['isDefinedBy']))[0]
e.set('scheme', str(scheme))
except:
pass
try:
label = list(g.objects(t, namespaces['rdfs']['label']))[0]
e.set('label', str(label))
except:
pass
orms = []
for orm in aggr._resourceMaps_:
if orm != rem:
self.make_link(root, 'alternate', orm.uri, g)
orms.append(orm.uri)
for t in aggr._ore.isDescribedBy:
# check not in orms
if not t in orms:
self.make_link(root, 'alternate', t, g)
self.generate_rdf(root, aggr)
## ReM Info
self.make_link(root, 'self', rem.uri, g)
e = SubElement(root, 'updated')
e.text = now()
# ReM Author
if rem._dcterms.creator:
uri = rem._dcterms.creator[0]
e = SubElement(root, 'generator', uri=str(uri))
agent = rem._agents_[uri]
n = agent._foaf.name[0]
e.text = str(n)
self.done_triples.append((uri, namespaces['foaf']['name'], n))
# if no logo, put in nice ORE icon
e = SubElement(root, 'icon')
if aggr._foaf.logo:
e.text = str(aggr._foaf.logo[0])
elif rem._foaf.logo:
e.text = str(rem._foaf.logo[0])
else:
e.text = "http://www.openarchives.org/ore/logos/ore_icon.png"
if rem._dc.rights:
e = SubElement(root, 'rights')
e.text = rem._dc.rights[0]
self.generate_rdf(root, rem)
## Process Entries
for (res, proxy) in aggr._resources_:
entry = SubElement(root, 'entry')
e = SubElement(entry, 'id')
if proxy:
e.text = str(proxy.uri)
else:
e.text = "urn:uuid:%s" % gen_uuid()
e = SubElement(entry, 'link', rel="alternate", href=str(res.uri))
# type = dc:format
fmt = list(g.objects(res.uri, namespaces['dc']['format']))
if fmt:
e.set('type', str(fmt[0]))
if not res._dc.title:
raise ValueError("All entries must have a title for ATOM serialisation")
else:
e = SubElement(entry, 'title')
e.text = str(res._dc.title[0])
for t in res._rdf.type:
e = SubElement(entry, 'category', term=str(t))
try:
scheme = list(g.objects(t, namespaces['rdfs']['isDefinedBy']))[0]
e.set('scheme', str(scheme))
except:
pass
try:
label = list(g.objects(t, namespaces['rdfs']['label']))[0]
e.set('label', str(label))
except:
pass
for a in res._dcterms.creator:
e = SubElement(entry, 'author')
agent = res._agents_[a]
self.make_agent(e, agent)
for a in res._dcterms.contributor:
e = SubElement(entry, 'contributor')
agent = res._agents_[a]
self.make_agent(e, agent)
if res._dcterms.abstract:
e = SubElement(entry, 'summary')
e.text = str(res._dcterms.abstract[0])
# Not sure about this at object level?
for oa in res._ore.isAggregatedBy:
if oa != aggr._uri_:
e = SubElement(entry, 'link', rel="related", href=str(oa))
e = SubElement(entry, 'updated')
e.text = now()
if proxy and proxy._ore.lineage:
e = SubElement(entry, 'link', rel="via", href=str(proxy._ore.lineage[0]))
res._currProxy_ = proxy
self.generate_rdf(entry, res)
res._currProxy_ = None
data = etree.tostring(root)
data = data.replace('\n', '')
data = self.spacesub.sub('', data)
uri = str(rem._uri_)
self.done_triples = []
return ReMDocument(uri, data)
| Python |
try:
import json
except ImportError:
import simplejson as json
from rdflib.syntax.parsers import Parser
from rdflib import URIRef, BNode, Literal
class JsonParser(Parser):
def __init__(self):
pass
def parse(self, source, sink, **args):
data = source.getByteStream().read()
objs = json.loads(data)
# check if pretty-json
keys = objs.keys()
pretty = 0
bindings = {}
for k in keys:
if k.startswith('xmlns$') or k.startswith('xmlns:'):
pretty = 1
bindings[k[6:]] = objs[k]
for k in keys:
if not k.startswith('xmlns$') and not k.startswith('xmlns:'):
if k[0] == "_" and k[1] in [':', '$']:
# bnode
s = BNode(k[2:])
else:
# uri
s = URIRef(k)
# predicates
preds = objs[k]
for (p, v) in preds.items():
if pretty:
dpidx = p.find('$')
if dpidx == -1:
dpidx = p.find(':')
if dpidx > -1:
pfx = p[:dpidx]
dmn = bindings.get(pfx, '')
if dmn:
pred = URIRef(dmn + p)
else:
raise ValueError("Unassigned Prefix: %s" % pfx)
else:
pred = URIRef(p)
else:
pred = URIRef(p)
for vh in v:
value = vh['value']
vt = vh['type']
if vt == 'literal':
args = {}
lang = vh.get('lang', '')
if lang:
args['lang'] = lang
datatype = vh.get('datatype', '')
if datatype:
args['datatype'] = datatype
val = Literal(value, **args)
elif vt == 'uri':
val = URIRef(value)
elif vt == 'bnode':
val = BNode(val[2:])
sink.add((s, pred, val))
# returns None
| Python |
def skipws(next):
skip = 1
if not skip:
return next
else:
def foo(*args):
tok = next(*args)
if tok.isspace():
tok = next(*args)
return tok
return foo
class ParseError(Exception):
pass
class MiniLex(object):
def __init__(self, data,
whitespace= " \t",
sep="[](){}<>\\/@:;,?=",
quotes="\"",
eof="\n\r"):
self.data = data
self.whitespace=whitespace
self.separators=sep
self.quotes=quotes
self.eof=eof
self.state = 0
self.token = []
self.quoted = ''
self.pos = 0
def __iter__(self):
return self
@skipws
def next(self):
while True:
if self.pos == len(self.data):
if self.token:
tok= ''.join(self.token)
self.token = []
return tok
else:
raise StopIteration
char = self.data[self.pos]
tok = ''
if self.quoted and not char in self.quotes:
self.token.append(char)
self.pos +=1
elif char in self.quotes:
if char == self.quoted:
# we're in quoted text
if self.data[self.pos-1] == "\\":
self.token.append(char)
self.pos += 1
else:
self.token.append(char)
tok = ''.join(self.token)
self.token = []
self.pos += 1
self.quoted=0
self.state=0
return tok
elif self.quoted:
# other quotes
self.token.append(char)
self.pos += 1
else:
# begin quoted text
if self.token:
tok = ''.join(self.token)
self.quoted=char
self.token = [char]
self.pos += 1
self.state = 2
if tok:
return tok
elif char in self.whitespace:
if self.state == 1:
self.token.append(char)
else:
if self.token:
tok = ''.join(self.token)
self.state = 1
self.token = [char]
self.pos += 1
if tok:
return tok
elif char in self.separators:
# can't join seps (currently)
if self.token:
tok = ''.join(self.token)
else:
tok = char
self.pos += 1
self.token = []
self.state = 0
return tok
elif char in self.eof:
if self.token:
return ''.join(self.token)
else:
raise StopIteration
else:
if self.state == 3:
self.token.append(char)
else:
if self.token:
tok = ''.join(self.token)
self.token = [char]
self.state=3
self.pos += 1
if tok:
return tok
class MimeType(object):
def __init__(self):
self.mimetype1 = ""
self.mimetype2 = ""
self.params = {}
self.qval = 1.0
def __str__(self):
#l = [('q', self.qval)]
#l.extend(self.params.items())
# Actually, most likely Don't want to serialize the qval
l = self.params.items()
if l:
return self.mimetype1 + "/" + self.mimetype2 + ";" + ";".join(["%s=%s" % x for x in l])
else:
return self.mimetype1 + "/" + self.mimetype2
def __repr__(self):
return "<MimeType: %s>" % self
def sort2(self):
if self.mimetype1 == "*":
return 0
elif self.mimetype2 == "*":
return 1
elif self.params:
return 2 + len(self.params)
else:
return 2
def matches(self, other):
if other.mimetype1 == self.mimetype1 or other.mimetype1 == '*' or self.mimetype1 == '*':
if other.mimetype2 == self.mimetype2 or other.mimetype2 == '*' or self.mimetype2 == '*':
if other.params == self.params:
return True
return False
class Parser(object):
def __init__(self, ml):
self.ml = ml
def process(self):
mts = []
mt = self.top()
while mt:
if mt.mimetype1 == "*" and mt.mimetype2 == "*" and mt.qval == 1.0:
# downgrade anything to the lowest, otherwise behaviour is
# non deterministic. See apache conneg rules.
mt.qval = 0.001
mts.append(mt)
mt = self.top()
return mts
def top(self):
mt = MimeType()
try:
tok = self.ml.next() # text
except StopIteration:
return None
mt.mimetype1 = tok
sl = self.ml.next() # /
if sl != "/":
raise ParseError("Expected /, got: " + sl)
tok2 = self.ml.next() # html
mt.mimetype2 = tok2
while True:
try:
tok = self.ml.next()
except StopIteration:
return mt
if tok == ',':
return mt
elif tok == ';':
(key, val) = self.param()
if key == "q":
mt.qval = float(val)
else:
mt.params[key] = val
else:
raise ParseError("Expected , or ; got: %r" % tok)
def param(self):
key = self.ml.next()
eq = self.ml.next()
if eq != "=":
raise ParseError("Expected =, got: " + sl)
val = self.ml.next()
return (key, val)
def best(client, server):
# step through client request against server possibilities
# and find highest according to qvals in client
# both client and server are lists of mt objects
# client should be sorted by qval already
# assume that server is unsorted
# AFAICT, if the request has any params, they MUST be honored
# so if params, and no exact match, discard
# And hence */*;params means that params must be matched.
for mtc in client:
# this is most wanted, can we provide?
for mts in server:
if mts.matches(mtc):
return mtc
return None
def parse(data):
lex = MiniLex(data)
p = Parser(lex)
mts = p.process()
mts.sort(key=lambda x: x.sort2(), reverse=True)
mts.sort(key=lambda x: x.qval, reverse=True)
return mts
if __name__ == '__main__':
ml = MiniLex("text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.2")
p = Parser(ml)
mts = p.process()
mts.sort(key=lambda x: x.sort2(), reverse=True)
mts.sort(key=lambda x: x.qval, reverse=True)
ml2 = MiniLex("text/xhtml+xml, text/xml, application/atom+xml, text/html;level=2")
p2 = Parser(ml2)
mts2 = p2.process()
b = best(mts, mts2)
print b
| Python |
import urllib
import time
import re
from rdflib import Namespace
### Configuration Options
### Assign a UUID URI or Blank Node for autogenerating agent URIs
### if not present in data
assignAgentUri = False
#assignAgentUri = True
### Use UUID or oreproxy.org for autogenerating proxy URIs if
### not present in data
proxyType = 'proxy'
# proxyType = 'UUID'
### What to do when encountering an unconnected graph:
unconnectedAction = 'ignore' # produce unconnected graph
#unconnectedAction = 'drop' # drop any unconnected triples silently
#unconnectedAction = 'warn' # print a warning
#unconnectedAction = 'raise' # raise an Exception
# Number of resources per page to serialise
pageSize = 10
# XSLT server to create alternate representation from Atom Entry
atomXsltUri = ""
# atomXsltUri = "http://www.oreproxy.org/alt?what=%s"
build_html_atom_content = False
accept_header = 'application/rdf+xml;q=1.0, application/atom+xml;q=0.9, text/rdf+n3;q=0.8'
protocolUriRe = re.compile("^([s]?http[s]?://|[t]?ftp:/|z39.50r:|gopher:|imap://|news:|nfs:|nntp:|rtsp:)")
def gen_proxy_uuid(res, aggr):
u = gen_uuid()
return "urn:uuid:%s" % u
def gen_proxy_oreproxy(res, aggr):
a = urllib.quote(str(aggr.uri))
ar = urllib.quote(str(res.uri))
return "http://oreproxy.org/r?what=%s&where=%s" % (ar,a)
# Hash must come after function definitions
# Define your own function, set proxyType, and add to hash
proxyTypeHash = {'UUID' : gen_proxy_uuid,
'proxy' : gen_proxy_oreproxy
}
### Namespace Definitions
### If you need a new namespace you MUST add it into this hash
namespaces = {'ore' : Namespace('http://www.openarchives.org/ore/terms/'),
'orex' : Namespace('http://foresite.cheshire3.org/orex/terms/'),
'dc' : Namespace('http://purl.org/dc/elements/1.1/'),
'mesur' : Namespace('http://www.mesur.org/schemas/2007-01/mesur#'),
'dcterms' : Namespace('http://purl.org/dc/terms/'),
'swap' : Namespace('http://purl.org/eprint/type/'),
'rdf' : Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#'),
'foaf' : Namespace('http://xmlns.com/foaf/0.1/'),
'rdfs' : Namespace('http://www.w3.org/2001/01/rdf-schema#'),
'dcmitype' : Namespace('http://purl.org/dc/dcmitype/'),
'atom' : Namespace('http://www.w3.org/2005/Atom'),
'owl' : Namespace('http://www.w3.org/2002/07/owl#'),
'xsd' : Namespace('http://www.w3.org/2001/XMLSchema'),
'xhtml' : Namespace('http://www.w3.org/1999/xhtml'),
'grddl' : Namespace('http://www.w3.org/2003/g/data-view#'),
'swetodblp' : Namespace('http://lsdis.cs.uga.edu/projects/semdis/opus#'),
'skos' : Namespace('http://www.w3.org/2004/02/skos/core#'),
'eurepo' : Namespace('info:eu-repo/semantics/'),
'at' : Namespace('http://purl.org/syndication/atomtriples/1/'),
'iana' : Namespace('http://www.iana.org/assignments/relation/'),
'bibo' : Namespace('http://purl.org/ontology/bibo/'),
'prism' : Namespace('http://prismstandard.org/namespaces/1.2/basic/'),
'vcard' : Namespace('http://nwalsh.com/rdf/vCard#'),
'zotero' : Namespace('http://www.zotero.org/namespaces/exprt#')
}
### Elements commonly used in ORE
### If an element is in this list, you can do object.predicate,
### rather than object._namespace.predicate
# (Not complete for most namespaces, just common terms)
elements = {
'ore' : ['describes', 'isDescribedBy', 'aggregates', 'isAggregatedBy', 'similarTo', 'proxyFor', 'proxyIn', 'lineage'],
'orex' : ['isAuthoritativeFor', 'AnonymousAgent', 'page', 'follows', 'firstPage', 'lastPage'],
'dc' : ['coverage', 'date', 'description', 'format', 'identifier', 'language', 'publisher', 'relation', 'rights', 'source', 'subject', 'title'], # no creator, contributor
'dcterms': ['abstract', 'accessRights', 'accrualMethod', 'accrualPeriodicity', 'accrualPolicy', 'alternative', 'audience', 'available', 'bibliographicCitation', 'conformsTo', 'contributor', 'created', 'creator', 'dateAccepted', 'dateCopyrighted', 'dateSubmitted', 'educationLevel', 'extent', 'hasFormat', 'hasPart', 'hasVersion', 'instructionalMethod', 'isFormatOf', 'isPartOf', 'isReferencedBy', 'isReplacedBy', 'isRequiredBy', 'issued', 'isVersionOf', 'license', 'mediator', 'medium', 'modified', 'provenance', 'references', 'replaces', 'requires', 'rights', 'rightsHolder', 'spatial', 'tableOfContents', 'temporal', 'valid'], # also rights
'foaf' : ['accountName', 'aimChatID', 'birthday', 'depiction', 'depicts', 'family_name', 'firstName', 'gender', 'givenname', 'homepage', 'icqChatID', 'img', 'interest', 'jabberID', 'knows', 'logo', 'made', 'maker', 'mbox', 'member', 'msnChatID', 'name', 'nick', 'openid', 'page', 'phone', 'surname', 'thumbnail', 'weblog', 'yahooChatID'],
'owl' : ['sameAs'],
'rdf' : ['type'],
'rdfs' : ['seeAlso', 'label', 'isDefinedBy'],
'mesur' : ['hasAccess', 'hasAffiliation', 'hasIssue', 'hasVolume', 'used', 'usedBy'],
'skos' : ['prefLabel', 'inScheme', 'broader', 'narrower', 'related', 'Concept', 'ConceptScheme', 'changeNote', 'editorialNote'],
'iana' : ['alternate', 'current' ,'enclosure', 'edit', 'edit-media', 'first', 'last', 'next', 'next-archive', 'previous', 'payment', 'prev-archive', 'related', 'replies', 'service', 'via'], # -self, -license
'bibo' : ['Article', 'Issue', 'Journal', 'pageStart', 'pageEnd', 'volume']
}
### The order in which to search the above hash
namespaceSearchOrder = ['ore', 'dc', 'dcterms', 'foaf', 'rdf', 'rdfs', 'orex', 'owl', 'mesur', 'skos', 'iana']
internalPredicates = [namespaces['orex']['isAuthoritativeFor'],
namespaces['orex']['page'],
]
namespaceElemRe = re.compile('^\{(.+)\}(.+)$')
# UUID generator
try:
# only in Python 2.5+
import uuid
def gen_uuid():
return str(uuid.uuid4())
except:
# Try 4Suite if installed
try:
from Ft.Lib.Uuid import GenerateUuid, UuidAsString
def gen_uuid():
return UuidAsString(GenerateUuid())
except:
# No luck, try to generate using unix command
import commands
def gen_uuid():
return commands.getoutput('uuidgen')
uuidre = re.compile("[0-9a-fA-F-]{36}")
uuid = gen_uuid()
if not uuidre.match(uuid):
# probably sh: command not found or other similar
# weakest version: just build random token
import random
chrs = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
def gen_uuid():
uuidl = []
for y in [8,4,4,4,12]:
for x in range(y):
uuidl.append(random.choice(chrs))
uuidl.append('-')
uuidl.pop(-1) # strip trailing -
return ''.join(uuidl)
def now():
return time.strftime("%Y-%m-%dT%H:%M:%SZ")
def gen_proxy_uri(res, aggr):
# Allow for easier expansion via adding fn to proxyTypeHash
if proxyTypeHash.has_key(proxyType):
return proxyTypeHash[proxyType](res, aggr)
else:
raise KeyError("Unknown proxyType setting: %s" % proxyType)
class OreException(Exception):
pass
| Python |
# Dependencies: rdflib
# lxml
libraryName= "Foresite Toolkit (Python)"
libraryUri = "http://foresite-toolkit.googlecode.com/#pythonAgent"
libraryVersion = "1.1"
libraryEmail = "foresite@googlegroups.com"
__all__ = ['ore', 'utils','parser', 'serializer', 'tripleStore', 'Aggregation', 'ResourceMap', 'AggregatedResource', 'Agent', 'ArbitraryResource', 'Proxy', 'ReMDocument', 'AtomSerializer', 'AtomParser', 'RdfLibSerializer', 'RdfLibParser', 'RdfAParser', 'RDFaSerializer', 'SQLiteTripleStore', 'MySQLTripleStore', 'BdbTripleStore']
from ore import *
from utils import *
from parser import *
from serializer import *
from tripleStore import *
from RDFaSerializer import *
| Python |
from __future__ import generators
from rdflib.syntax.serializers import Serializer
from rdflib.URIRef import URIRef
from rdflib.Literal import Literal
from rdflib.BNode import BNode
from rdflib.util import uniq
from rdflib.exceptions import Error
from rdflib.syntax.xml_names import split_uri
from xml.sax.saxutils import quoteattr, escape
class RDFaSerializer(Serializer):
def __init__(self, store):
super(RDFaSerializer, self).__init__(store)
def __bindings(self):
store = self.store
nm = store.namespace_manager
bindings = {}
for predicate in uniq(store.predicates()):
prefix, namespace, name = nm.compute_qname(predicate)
bindings[prefix] = URIRef(namespace)
RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
if "rdf" in bindings:
assert bindings["rdf"]==RDFNS
else:
bindings["rdf"] = RDFNS
for prefix, namespace in bindings.iteritems():
yield prefix, namespace
def serialize(self, stream, base=None, encoding=None, **args):
self.base = base
self.__stream = stream
self.__serialized = {}
encoding = self.encoding
self.write = lambda uni: stream.write(uni.encode(encoding, 'replace'))
# Basic invisible RDFa
# <div about="subject">
# <a rel="predicate" href="object"></a>
# <span property="predicate" content="literal"></span>
xmlns = []
for b in self.__bindings():
xmlns.append('xmlns:%s=\"%s\"' % b)
self.write('<div id="ore:ResourceMap" %s>\n' % ' '.join(xmlns))
for subject in self.store.subjects():
self.subject(subject, 1)
self.write('</div>')
del self.__serialized
def subject(self, subject, depth=1):
if not subject in self.__serialized:
self.__serialized[subject] = 1
indent = " " * depth
if isinstance(subject, URIRef):
uri = quoteattr(self.relativize(subject))
else:
# Blank Node
uri = '"[%s]"' % subject.n3()
self.write('%s<div about=%s>\n' % (indent, uri))
for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth+1)
self.write("%s</div>\n" % (indent))
def predicate(self, predicate, object, depth=1):
indent = " " * depth
qname = self.store.namespace_manager.qname(predicate)
if isinstance(object, Literal):
attributes = ""
if object.language:
attributes += ' xml:lang="%s"'%object.language
#if object.datatype:
# attributes += ' rdf:datatype="%s"'%object.datatype
self.write('%s<span property="%s" content="%s"%s></span>\n' %
(indent, qname, escape(object, {'"':'"'}), attributes))
else:
if isinstance(object, URIRef):
href = quoteattr(self.relativize(object))
else:
# BNode
href= '"[%s]"' % object.n3()
self.write('%s<a rel="%s" href=%s></a>\n' % (indent, qname, href))
| Python |
from __future__ import generators
from rdflib.syntax.serializers import Serializer
from rdflib.URIRef import URIRef
from rdflib.Literal import Literal
from rdflib.BNode import BNode
from rdflib.util import uniq
from rdflib.exceptions import Error
from rdflib.syntax.xml_names import split_uri
from xml.sax.saxutils import quoteattr, escape
try:
import json
except ImportError:
import simplejson as json
class JsonSerializer(Serializer):
gdataColon = 0
def __init__(self, store):
super(JsonSerializer, self).__init__(store)
self.gdataColon = 0
self.prettyPredName = 0
def serialize(self, stream, base=None, encoding=None, **args):
self.base = base
self.__stream = stream
self.__serialized = {}
self.write = lambda u: stream.write(u.encode(self.encoding, 'replace'))
self.jsonObj = {}
self.initObj()
for subject in self.store.subjects():
self.subject(subject)
srlzd = json.dumps(self.jsonObj, indent=2)
self.write(srlzd)
del self.__serialized
def initObj(self):
pass
def subject(self, subject):
if not subject in self.__serialized:
self.__serialized[subject] = 1
if isinstance(subject, URIRef):
uri = self.relativize(subject)
else:
# Blank Node
uri = '%s' % subject.n3()
if self.gdataColon:
uri = uri.replace(':', '$')
data = {}
for predicate, objt in self.store.predicate_objects(subject):
if self.prettyPredName:
predname = self.store.namespace_manager.qname(predicate)
else:
predname = self.relativize(predicate)
if self.gdataColon:
predname = predname.replace(':', '$')
value = self.value(objt)
if data.has_key(predname):
data[predname].append(value)
else:
data[predname] = [value]
self.jsonObj[uri] = data
def value(self, objt):
data = {}
if isinstance(objt, Literal):
data['type'] = 'literal'
if objt.language:
data['lang'] = objt.language
if objt.datatype:
data['datatype'] = objt.datatype
data['value'] = objt
else:
if isinstance(objt, URIRef):
href = self.relativize(objt)
data['type'] = 'uri'
else:
# BNode
href= '%s' % objt.n3()
if self.gdataColon:
href = href.replace(':', '$')
data['type'] = 'bnode'
data['value'] = href
return data
class PrettyJsonSerializer(JsonSerializer):
def __init__(self, store):
super(PrettyJsonSerializer, self).__init__(store)
self.gdataColon = 1
self.prettyPredName = 1
def __bindings(self):
store = self.store
nm = store.namespace_manager
bindings = {}
for predicate in uniq(store.predicates()):
prefix, namespace, name = nm.compute_qname(predicate)
bindings[prefix] = URIRef(namespace)
RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
if "rdf" in bindings:
assert bindings["rdf"]==RDFNS
else:
bindings["rdf"] = RDFNS
for prefix, namespace in bindings.iteritems():
yield prefix, namespace
def initObj(self):
for b in self.__bindings():
self.jsonObj['xmlns$%s' % b[0]] = '%s' % b[1]
| Python |
from ore import *
from utils import namespaces, OreException, unconnectedAction, protocolUriRe
from lxml import etree
from xml.dom import minidom
from rdflib import StringInputSource, URIRef, plugin, syntax
plugin.register('json', syntax.parsers.Parser, 'foresite.JsonParser', 'JsonParser')
class OREParser(object):
# Take some data and produce objects/graph
def __init__(self):
self.strict = False
class RdfLibParser(OREParser):
def set_fields(self, what, graph):
for (pred, obj) in graph.predicate_objects(what.uri):
# assert to what's graph
what.graph.add((what.uri, pred, obj))
def process_graph(self, graph):
# take graph and find objects, split up stuff into graph
# Find ReM/Aggr
lres = list(graph.query("PREFIX ore: <%s> SELECT ?a ?b WHERE {?a ore:describes ?b .}" % namespaces['ore']))
try:
uri_r = lres[0][0]
uri_a = lres[0][1]
except IndexError:
raise OreException("Graph does not have mandatory ore:describes triple")
if self.strict and len(lres) != 1:
raise OreException("Graph must contain exactly one ore:describes triple")
if self.strict and not protocolUriRe.match(uri_r):
raise OreException("Resource Map URI must be protocol-based URI: %s" % uri_r)
if self.strict and not protocolUriRe.match(uri_a):
raise OreException("Aggregation URI must be protocol-based URI: %s" % uri_a)
remc = list(graph.query("PREFIX dcterms: <%s> SELECT ?a WHERE { <%s> dcterms:creator ?a .}" % (namespaces['dcterms'], uri_r)))
if self.strict and not remc:
raise OreException("Graph does not have mandatory 'ResourceMap dcterms:creator ?x' triple")
remc = list(graph.query("PREFIX dcterms: <%s> SELECT ?a WHERE { <%s> dcterms:modified ?a .}" % (namespaces['dcterms'], uri_r)))
if self.strict and not remc:
raise OreException("Graph does not have mandatory 'ResourceMap dcterms:modified timestamp' triple")
rem = ResourceMap(uri_r)
aggr = Aggregation(uri_a)
rem.set_aggregation(aggr)
self.set_fields(rem, graph)
self.set_fields(aggr, graph)
things = {uri_r : rem, uri_a : aggr}
res2 = graph.query("PREFIX ore: <http://www.openarchives.org/ore/terms/> SELECT ?b WHERE {<%s> ore:aggregates ?b .}" % uri_a )
for uri_ar in res2:
uri_ar = uri_ar[0]
if self.strict and not protocolUriRe.match(uri_ar):
raise OreException("Aggregated Resource URI must be protocol-based URI: %s" % uri_ar)
res = AggregatedResource(uri_ar)
things[uri_ar] = res
proxy = list(graph.query("PREFIX ore: <http://www.openarchives.org/ore/terms/> SELECT ?a WHERE {?a ore:proxyFor <%s> .}" % uri_ar ))
try:
uri_p = proxy[0][0]
p = Proxy(uri_p)
p.set_forIn(res, aggr)
things[uri_p] = p
aggr.add_resource(res, p)
self.set_fields(res, graph)
self.set_fields(p, graph)
except IndexError:
aggr.add_resource(res, None)
self.set_fields(res, graph)
allThings = things.copy()
agents = list(graph.query("PREFIX foaf: <%s> PREFIX dcterms: <%s> SELECT ?a WHERE { { ?a foaf:name ?b } UNION { ?a foaf:mbox ?b } UNION { ?b dcterms:creator ?a } UNION { ?b dcterms:contributor ?a } }" % (namespaces['foaf'], namespaces['dcterms'])))
for a_uri in agents:
a_uri = a_uri[0]
a = Agent(a_uri)
allThings[a_uri] = a
self.set_fields(a, graph)
for (subj, pred) in graph.subject_predicates(URIRef(a_uri)):
if things.has_key(subj):
# direct manipulation, as will have already added predicate in set_fields
what = things[subj]
what._agents_[a_uri] = a
# rem and aggr will have default rdf:type triples already
allThings.update(rem.triples)
allThings.update(aggr.triples)
for subj in graph.subjects():
if not allThings.has_key(subj):
# triple needed
ar = ArbitraryResource(subj)
allThings[subj] = ar
# find our graph
for (pred, obj) in graph.predicate_objects(subj):
ar.graph.add((subj, pred, obj))
# find shortest distance to main object to link to main graph
# Breadth First Search
found = 0
checked = {}
tocheck = list(graph.subject_predicates(subj))
while tocheck:
subsubj = tocheck.pop(0)[0]
checked[subsubj] = 1
if things.has_key(subsubj):
things[subsubj]._triples_[ar.uri] = ar
found = 1
break
else:
extd = list(graph.subject_predicates(subsubj))
if extd:
for e in extd[0]:
if not checked.has_key(e):
tocheck.append(e)
if not found:
if unconnectedAction == 'ignore':
# Input graph is not connected!
rem._triples_[ar.uri] = ar
elif unconnectedAction == 'warn':
print "Input Graph Not Connected at: %s" % subj
elif unconnectedAction == 'raise':
raise OreException("Input Graph Not Connected at: %s" % subj)
return rem
def parse(self, doc):
# parse to find graph
graph = Graph()
data = StringInputSource(doc.data)
if doc.format:
graph.parse(data, format=doc.format)
else:
graph.parse(data)
return self.process_graph(graph)
try:
# Try to use more featureful pyRDFa parser
from pyRdfa import parseRDFa, Options
rdfaOptions = Options(warnings=False)
rdfaOptions.warning_graph = None
class RdfAParser(RdfLibParser):
def parse(self, doc):
root = minidom.parse(doc)
graph = parseRDFa(root, doc.uri, options=rdfaOptions)
return self.process_graph(graph)
except ImportError:
# No pyRdfa lib, default to using rdflib's parser
class RdfAParser(RdfLibParser):
pass
class AtomParser(OREParser):
# 1.0's entry style atom ReM
def handle_person(self, elem, what, type):
name = elem.xpath('atom:name/text()', namespaces=namespaces)
mbox = elem.xpath('atom:email/text()', namespaces=namespaces)
uri = elem.xpath('atom:uri/text()', namespaces=namespaces)
if uri:
agent = Agent(uri[0])
else:
agent = Agent()
self.all_objects[agent._uri_] = agent
if name:
agent.name = name[0]
if mbox:
mb = mbox[0]
if mb[:7] != "mailto:":
mb = "mailto:%s" % mb
agent.mbox = mb
what.add_agent(agent, type)
def handle_category(self, elem, what):
uri = elem.attrib['term']
scheme = elem.attrib.get('scheme', '')
label = elem.attrib.get('label', '')
if scheme[:47] == "http://www.openarchives.org/ore/terms/datetime/":
# magic, ignore
return
what._rdf.type = URIRef(uri)
if scheme or label:
t = ArbitraryResource(uri)
if label:
t._rdfs.label = label
if scheme:
t._rdfs.isDefinedBy = scheme
what.add_triple(t)
self.all_objects[t._uri_] = t
def handle_link(self, elem, what):
type = elem.attrib['rel']
if type in ['self', 'license']:
# already handled
return
uri = elem.attrib['href']
format = elem.attrib.get('type', '')
lang = elem.attrib.get('hreflang', '')
title = elem.attrib.get('title', '')
extent = elem.attrib.get('length', '')
# links only apply to aggregations now
# and can be anything
t = None
if type == str(namespaces['ore']['aggregates']):
# Build Aggregated Resource
t = AggregatedResource(uri)
what.aggregates = t._uri_
what._resources_.append((t, None))
t._aggregations_.append((what, None))
# in RDF, if proxy check for AggRes
else:
if type in elements['iana']:
pred = namespaces['iana'][type]
else:
pred = URIRef(type)
# direct graph manipulation rather than try to split
what.graph.add((what._uri_, pred, URIRef(uri)))
if format or lang or title or extent:
t = ArbitraryResource(uri)
if format or lang or title or extent:
self.all_objects[t._uri_] = t
if format:
t._dc.format = format
if lang:
t._dc.language = lang
if title:
t._dc.title = title
if extent:
t._dc.extent = extent
if isinstance(t, ArbitraryResource):
what.add_triple(t)
def handle_rdf(self, elem, what):
# Create AT for @about
try:
uri_at = elem.attrib['{%s}about' % namespaces['rdf']]
except:
uri_at = elem.attrib['{%s}nodeID' % namespaces['rdf']]
if uri_at == str(what.uri):
at = what
elif elem.xpath('ore:proxyFor', namespaces=namespaces):
# proxy
at = Proxy(uri_at)
else:
at = ArbitraryResource(uri_at)
what.add_triple(at)
self.all_objects[at._uri_] = at
for kid in elem:
# set attribute on at from kid
full = kid.tag # {ns}elem
match = namespaceElemRe.search(full)
if match:
name = match.groups()[1]
else:
name = full
val = kid.text
if not val:
# look in @rdf:resource
try:
val = kid.attrib['{%s}resource' % namespaces['rdf']]
val = URIRef(val)
except:
# could be a ref to a blank node
try:
val = kid.attrib['{%s}nodeID' % namespaces['rdf']]
val = URIRef(val)
except:
continue
try:
setattr(at, name, val)
except:
# Probably failed to resolve attribute name -> ns
pass
if isinstance(at, Proxy):
# try to update proxyIn and proxyFor
try:
aggr = self.all_objects[at._ore.proxyIn[0]]
res = self.all_objects[at._ore.proxyFor[0]]
aggr._resources_.remove((res, None))
aggr._resources_.append((res, at))
res._aggregations_.remove((aggr, None))
res._aggregations_.append((aggr, at))
at._resource_ = res
at._aggregation_ = aggr
except KeyError:
# third party proxy
pass
def parse(self, doc):
root = etree.fromstring(doc.data)
self.curr_root = root
graph = Graph()
# first construct aggr and rem
self.all_objects = {}
uri_a = root.xpath("/atom:entry/atom:link[@rel='http://www.openarchives.org/ore/terms/describes']/@href", namespaces=namespaces)
uri_r = root.xpath("/atom:entry/atom:link[@rel='self']/@href", namespaces=namespaces)
rem = ResourceMap(uri_r[0])
aggr = Aggregation(uri_a[0])
rem.set_aggregation(aggr)
self.all_objects[rem._uri_] = rem
self.all_objects[aggr._uri_] = aggr
# Aggregation Info
title = root.xpath("/atom:entry/atom:title/text()", namespaces=namespaces)
aggr._dc.title = title[0]
for auth in root.xpath('/atom:entry/atom:author', namespaces=namespaces):
self.handle_person(auth, aggr, 'creator')
for auth in root.xpath('/atom:entry/atom:contributor', namespaces=namespaces):
self.handle_person(auth, aggr, 'contributor')
for cat in root.xpath('/atom:entry/atom:category', namespaces=namespaces):
self.handle_category(cat, aggr)
for link in root.xpath('/atom:entry/atom:link', namespaces=namespaces):
self.handle_link(link, aggr)
summary = root.xpath("/atom:entry/atom:summary/text()", namespaces=namespaces)
if summary:
aggr._dc.description = summary[0]
# Resource Map Info
aid = root.xpath("/atom:entry/atom:id/text()", namespaces=namespaces)
at = ArbitraryResource(aid[0])
at._dcterms.hasVersion = rem._uri_
rem.add_triple(at)
self.all_objects[at._uri_] = at
updated = root.xpath("/atom:entry/atom:updated/text()", namespaces=namespaces)
if updated:
rem._dcterms.modified = updated[0]
elif self.strict:
raise OreException("Graph does not have mandatory 'ResourceMap dcterms:modified timestamp' triple")
published = root.xpath("/atom:entry/atom:published/text()", namespaces=namespaces)
if published:
rem._dcterms.created = published[0]
rights = root.xpath("/atom:entry/atom:rights/text()", namespaces=namespaces)
if rights:
rem._dc.rights = rights[0]
lic = root.xpath("/atom:entry/atom:link[@rel='license']/@href", namespaces=namespaces)
if lic:
rem._dcterms.rights = URIRef(lic[0])
for rauth in root.xpath('/atom:entry/atom:source/atom:author', namespaces=namespaces):
self.handle_person(rauth, rem, 'creator')
for rauth in root.xpath('/atom:entry/atom:source/atom:contributor', namespaces=namespaces):
self.handle_person(rauth, rem, 'contributor')
for rdf in root.xpath('/atom:entry/ore:triples/rdf:Description', namespaces=namespaces):
try:
about = URIRef(rdf.attrib['{%s}about' % namespaces['rdf']])
except:
# probably a blank node
try:
about = BNode(rdf.attrib['{%s}nodeID' % namespaces['rdf']])
except:
raise
if about in self.all_objects:
self.handle_rdf(rdf, self.all_objects[about])
else:
self.handle_rdf(rdf, aggr)
self.all_objects = {}
return rem
class OldAtomParser(AtomParser):
# 0.9's feed style atom ReM
remMap = {}
aggrMap = {}
entryMap = {}
aggrRels = {}
entryRels = {}
def __init__(self):
self.remMap = {'/atom:feed/atom:updated/text()' :
{'p' : 'modified',
'ns' : 'dcterms'},
'/atom:feed/atom:rights/text()' :
{'p' : 'rights',
'ns' : 'dc'},
"atom:link[@rel='self']/@type" :
{'p' : 'format'},
"atom:link[@rel='self']/@hreflang" :
{'p' : 'language'},
"atom:link[@rel='self']/@title" :
{'p' : 'title'},
"atom:link[@rel='self']/@length" :
{'p' : 'extent'}
}
self.aggrMap = {'/atom:feed/atom:title/text()' :
{'p' : 'title'},
'/atom:feed/atom:icon/text()' :
{'p' : 'logo', 'type' : URIRef},
'/atom:feed/atom:logo/text()' :
{'p' : 'logo', 'type' : URIRef},
'/atom:feed/atom:subtitle/text()' :
{'p' : 'description'}
}
# about aggregated resource
self.entryMap = {'atom:title/text()' : {'p' : 'title'},
'atom:summary/text()' : {'p' : 'abstract', 'ns' : 'dcterms'},
"atom:link[@rel='alternate']/@type" :
{'p' : 'format'},
"atom:link[@rel='alternate']/@hreflang" :
{'p' : 'language'},
"atom:link[@rel='alternate']/@title" :
{'p' : 'title'},
"atom:link[@rel='alternate']/@length" :
{'p' : 'extent'}
}
self.aggrRels = {'related' : {'p' : 'similarTo'},
'alternate' : {'p' : 'isDescribedBy'},
'license' : {'p' : 'rights', 'ns' : 'dcterms'}
}
# self = no map, alternate = URI-AR, via = Proxy
self.entryRels = {'related' : {'p' : 'isAggregatedBy'},
'license' : {'p' : 'rights', 'ns' : 'dcterms'}
}
def handle_person(self, elem, what, type):
name = elem.xpath('atom:name/text()', namespaces=namespaces)
mbox = elem.xpath('atom:email/text()', namespaces=namespaces)
uri = elem.xpath('atom:uri/text()', namespaces=namespaces)
if not uri:
uri = ["urn:uuid:%s" % gen_uuid()]
agent = Agent(uri[0])
if name:
agent.name = name[0]
if mbox:
mb = mbox[0]
if mb[:7] != "mailto:":
mb = "mailto:%s" % mb
agent.mbox = mb
what.add_agent(agent, type)
def handle_category(self, elem, what):
uri = elem.attrib['term']
scheme = elem.attrib.get('scheme', '')
label = elem.attrib.get('label', '')
what._rdf.type = URIRef(uri)
if scheme or label:
t = ArbitraryResource(uri)
if label:
t._rdfs.label = label
if scheme:
t._rdfs.isDefinedBy = scheme
what.add_triple(t)
def handle_link(self, elem, what):
uri = elem.attrib['href']
type = elem.attrib['rel']
format = elem.attrib.get('type', '')
lang = elem.attrib.get('hreflang', '')
title = elem.attrib.get('title', '')
extent = elem.attrib.get('length', '')
# These don't map 'self', 'next', 'archive' etc
if isinstance(what, Aggregation):
pred = self.aggrRels.get(type, '')
else:
pred = self.entryRels.get(type, '')
if pred:
if pred.has_key('ns'):
getattr(what, "_%s" % pred['ns'])
setattr(what, pred['p'], URIRef(uri))
if format or lang or title or extent:
t = ArbitraryResource(uri)
if format:
t._dc.format = format
if lang:
t._dc.language = lang
if title:
t._dc.title = title
if extent:
t._dc.extent = extent
what.add_triple(t)
def handle_rdf(self, elem, what):
# Create AT for @about
uri_at = elem.attrib['{%s}about' % namespaces['rdf']]
if uri_at == str(what.uri):
at = what
else:
at = ArbitraryResource(uri_at)
what.add_triple(at)
for kid in elem:
# set attribute on at from kid
full = kid.tag # {ns}elem
match = namespaceElemRe.search(full)
if match:
name = match.groups()[1]
else:
name = full
val = kid.text
if not val:
# look in @rdf:resource
try:
val = kid.attrib['{%s}resource' % namespaces['rdf']]
val = URIRef(val)
except:
continue
setattr(at, name, val)
def parse(self, doc):
root = etree.fromstring(doc.data)
self.curr_root = root
graph = Graph()
# first construct aggr and rem
try:
del namespaces['']
except:
pass
uri_a = root.xpath('/atom:feed/atom:id/text()', namespaces=namespaces)
uri_r = root.xpath("/atom:feed/atom:link[@rel='self']/@href", namespaces=namespaces)
rem = ResourceMap(uri_r[0])
aggr = Aggregation(uri_a[0])
rem.set_aggregation(aggr)
for (xp, pred) in self.remMap.iteritems():
val = root.xpath(xp, namespaces=namespaces)
for v in val:
if pred.has_key('ns'):
getattr(rem, "_%s" % pred['ns'])
if pred.has_key('type'):
v = pred['type'](v)
setattr(rem, pred['p'], v)
# Handle generator
gen = root.xpath('/atom:feed/atom:generator', namespaces=namespaces)
if gen:
gen = gen[0]
try:
uri = gen.attrib['uri']
except:
uri = "urn:uuid:%s" % gen_uuid()
name = gen.text
agent = Agent(uri)
agent.name = name
rem.add_agent(agent, 'creator')
for (xp, pred) in self.aggrMap.iteritems():
val = root.xpath(xp, namespaces=namespaces)
for v in val:
if pred.has_key('ns'):
getattr(aggr, "_%s" % pred['ns'])
if pred.has_key('type'):
v = pred['type'](v)
setattr(aggr, pred['p'], v)
# Now handle types, agents, links
for auth in root.xpath('/atom:feed/atom:author', namespaces=namespaces):
self.handle_person(auth, aggr, 'creator')
for auth in root.xpath('/atom:feed/atom:contributor', namespaces=namespaces):
self.handle_person(auth, aggr, 'contributor')
for cat in root.xpath('/atom:feed/atom:category', namespaces=namespaces):
self.handle_category(cat, aggr)
for link in root.xpath('/atom:feed/atom:link', namespaces=namespaces):
self.handle_link(link, aggr)
# RDF blocks. Put everything on aggregation
for rdf in root.xpath('/atom:feed/rdf:Description', namespaces=namespaces):
if rdf.attrib['{%s}about' % namespaces['rdf']] == uri_r[0]:
self.handle_rdf(rdf, rem)
else:
self.handle_rdf(rdf, aggr)
for entry in root.xpath('/atom:feed/atom:entry', namespaces=namespaces):
uri_p = entry.xpath('atom:id/text()', namespaces=namespaces)
uri_ar = entry.xpath("atom:link[@rel='alternate']/@href", namespaces=namespaces)
res = AggregatedResource(uri_ar[0])
proxy = Proxy(uri_p[0])
proxy.set_forIn(res, aggr)
aggr.add_resource(res, proxy)
# look for via
via = entry.xpath("atom:link[@rel='via']/@href", namespaces=namespaces)
if via:
proxy._ore.lineage = URIRef(via[0])
for (xp, pred) in self.entryMap.iteritems():
val = entry.xpath(xp, namespaces=namespaces)
for v in val:
if pred.has_key('ns'):
getattr(res, "_%s" % pred['ns'])
if pred.has_key('type'):
v = pred['type'](v)
setattr(res, pred['p'], v)
for auth in entry.xpath('atom:author', namespaces=namespaces):
self.handle_person(auth, res, 'creator')
for auth in entry.xpath('atom:contributor', namespaces=namespaces):
self.handle_person(auth, res, 'contributor')
for cat in entry.xpath('atom:category', namespaces=namespaces):
self.handle_category(cat, res)
for link in entry.xpath('atom:link', namespaces=namespaces):
self.handle_link(link, res)
# RDF blocks. Put everything on aggregation
for rdf in entry.xpath('rdf:Description', namespaces=namespaces):
self.handle_rdf(rdf, res)
return rem
| Python |
from ore import *
from utils import namespaces
from rdflib import URIRef, plugin, store
# Store raw triples into a TripleStore somewhere
class TripleStore(object):
def __init__(self, configuration, db, create):
self.configuration = configuration
self.create = create
self.db = db
if db:
self.store = plugin.get(self.storeType,store.Store)(db)
else:
self.store = plugin.get(self.storeType,store.Store)()
self.store.open(configuration, create)
def close(self):
self.store.close()
def store_aggregation(self, aggr):
# Non memory graph
g = Graph(self.store, aggr.uri)
# Don't want to serialise resource map info?
[g.add(t) for t in aggr._graph_]
for at in aggr._triples_:
[g.add(t) for t in at._graph_]
for c in aggr._agents_:
[g.add(t) for t in c._graph_]
for (res, proxy) in aggr._resources_:
[g.add(t) for t in res._graph_]
if proxy:
[g.add(t) for t in proxy._graph_]
for at in res._triples_:
[g.add(t) for t in at._graph_]
for c in res._agents_:
[g.add(t) for t in c._graph_]
if isinstance(res, Aggregation):
# don't recurse, remove aggregates
for a in res._ore.aggregates:
g.remove((res._uri_, namespaces['ore']['aggregates'], a))
# but keep all other triples
g.commit()
return g
def set_fields(self, what, graph):
for (pred, obj) in graph.predicate_objects(what.uri):
# assert to what's graph
what.graph.add((what.uri, pred, obj))
def load_aggregation(self, identifier):
if not isinstance(identifier, URIRef):
identifier = URIRef(identifier)
graph = Graph(self.store, identifier)
if not len(graph):
aggr = None
else:
uri_a = identifier
aggr = Aggregation(uri_a)
self.set_fields(aggr, graph)
things = {uri_a : aggr}
res2 = graph.query("PREFIX ore: <http://www.openarchives.org/ore/terms/> SELECT ?b WHERE {<%s> ore:aggregates ?b .}" % uri_a )
for uri_ar in res2:
uri_ar = uri_ar[0]
res = AggregatedResource(uri_ar)
things[uri_ar] = res
proxy = list(graph.query("PREFIX ore: <http://www.openarchives.org/ore/terms/> SELECT ?a WHERE {?a ore:proxyFor <%s> .}" % uri_ar ))
try:
uri_p = proxy[0][0]
p = Proxy(uri_p)
p.set_forIn(res, aggr)
things[uri_p] = p
aggr.add_resource(res, p)
self.set_fields(res, graph)
self.set_fields(p, graph)
except IndexError:
aggr.add_resource(res, None)
self.set_fields(res, graph)
allThings = things.copy()
agents = list(graph.query("PREFIX foaf: <%s> PREFIX dcterms: <%s> SELECT ?a WHERE { { ?a foaf:name ?b } UNION { ?a foaf:mbox ?b } UNION { ?b dcterms:creator ?a } UNION { ?b dcterms:contributor ?a } }" % (namespaces['foaf'], namespaces['dcterms'])))
for a_uri in agents:
a_uri = a_uri[0]
a = Agent(a_uri)
allThings[a_uri] = a
self.set_fields(a, graph)
for (subj, pred) in graph.subject_predicates(URIRef(a_uri)):
if things.has_key(subj):
# direct manipulation, as will have already added predicate in set_fields
things[subj]._agents_.append(a)
for at in aggr.triples:
allThings[at.uri] = at
for subj in graph.subjects():
if not allThings.has_key(subj):
# triple needed
ar = ArbitraryResource(subj)
allThings[subj] = ar
# find our graph
for (pred, obj) in graph.predicate_objects(subj):
ar.graph.add((subj, pred, obj))
# find shortest distance to main object to link to main graph
# Breadth First Search
found = 0
checked = {}
tocheck = list(graph.subject_predicates(subj))
while tocheck:
subsubj = tocheck.pop(0)[0]
if things.has_key(subsubj):
things[subsubj]._triples_.append(ar)
found = 1
break
else:
tocheck.extend(graph.subject_predicates(subsubj))
if not found:
# Input graph is not connected!
aggr._triples_.append(ar)
return aggr
# types: Sleepycat, MySQL, SQLite. Others: ZODB, Redland
class SQLiteTripleStore(TripleStore):
def __init__(self, configuration='', db="rdfstore.sql", create=False):
""" configuration = path to create store in """
self.storeType = 'SQLite'
TripleStore.__init__(self,configuration,db,create)
class MySQLTripleStore(TripleStore):
def __init__(self, configuration='', db="rdfstore", create=False):
""" configuration = dbapi2 config:
host=SQL-HOST,password=PASSWORD,user=USER,db=DB"
"""
#"
self.storeType = 'MySQL'
TripleStore.__init__(self,configuration,db,create)
class BdbTripleStore(TripleStore):
def __init__(self, configuration='', db='', create=False):
""" configuration = path to create files in """
#"
self.storeType = 'Sleepycat'
TripleStore.__init__(self, configuration, db, create)
| Python |
#
# Simple Mod_Python handler for validating and transforming
# ORE Resource Maps
#
# apache config:
# <Directory /home/cheshire/install/htdocs/txr>
# SetHandler mod_python
# PythonDebug On
# PythonPath "['/path/to/validateHandler.py/']+sys.path"
# PythonHandler validateHandler
# </Directory>
import cgitb
from mod_python import apache
from mod_python.util import FieldStorage
import re
from foresite import *
from foresite import conneg
from foresite.utils import namespaces, OreException
from foresite.serializer import OldAtomSerializer
from xml.sax._exceptions import SAXParseException
srlzHash = {'rdf.xml' : RdfLibSerializer('xml'),
'pretty.xml' : RdfLibSerializer('pretty-xml'),
'rem.nt' : RdfLibSerializer('nt'),
'rem.n3' : RdfLibSerializer('n3'),
'rem.turtle' : RdfLibSerializer('turtle'),
'rdfa.html' : RdfLibSerializer('rdfa'),
'atom.xml' : AtomSerializer(),
'old-atom.xml' : OldAtomSerializer()}
srlzHash['old-atom.xml'].mimeType = "application/atom+xml;version=0.9"
srlzHash['pretty.xml'].mimeType += ";format=pretty"
p = RdfLibParser()
p.strict = True
ap = AtomParser()
p.strict = True
rdfap = RdfAParser()
p.strict = True
mimeHash = {}
for (k,v) in srlzHash.items():
mimeHash[v.mimeType] = k
mimestr = ', '.join(mimeHash.keys())
mimeList = conneg.parse(mimestr)
protoUriRe = re.compile("^([s]?http[s]?://|[t]?ftp:/|z39.50r:|gopher:|imap://|news:|nfs:|nntp:|rtsp:)")
class validateHandler:
def send(self, text, req, code=200, ct="text/xml"):
req.content_type = ct
req.content_length = len(text)
req.send_http_header()
if type(text) == unicode:
req.write(text.encode('utf-8'))
else:
req.write(text)
def error(self, msg, req):
text = "<html><body><h3>Error</h3><p>%s</p></body></html>" % msg
req.content_type = "text/html"
req.content_length = len(text)
req.send_http_header()
req.write(text)
def handle(self, req):
path = req.uri[5:]
form = FieldStorage(req)
strict = form.get('strict', True)
if strict in ['false', 'False', '0', None, '']:
strict = False
mt = form.get('mimeType', '')
mt = mt.replace(' ', '+')
if not mt:
xtn = form.get('extension', '')
if xtn:
if not srlzHash.has_key(xtn):
# can't continue
raise ValueError(xtn)
else:
mt = srlzHash[xtn].mimeType
if not mt:
try:
wanted = req.headers_in['Accept']
mts = conneg.parse(wanted)
mt = conneg.best(mts, mimeList)
except:
mt = ''
if mt:
xtn = mimeHash[str(mt)]
else:
# default to rdf/xml
xtn = "rdf.xml"
srlz = srlzHash[xtn]
if form.has_key('aggregation'):
uri = form.get('aggregation')
else:
uri = path
if not uri:
data = '<html><body>Instructions etc. goes here</body></html>'
self.send(data, req, ct="text/html");
return
elif not protoUriRe.match(uri):
self.error("Resource Map URI must be a protocol based URI", req)
return
try:
# fetch
rd = ReMDocument(uri)
except Exception, e:
self.error("Could not retrieve Resource Map from '%s': %s" % (uri, e.message), req)
return
try:
# parse
if rd.format == 'atom':
parser = ap
elif rd.format == 'rdfa':
parser = rdfap
else:
parser = p
if not strict:
parser.strict = False
try:
rem = parser.parse(rd)
parser.strict = True
except:
parser.strict = True
raise
except OreException, e:
# get exception message
self.error("Resource Map Invalid: %s" % e.message, req)
return
except SAXParseException, e:
self.error("Could not parse XML: %s (line %s, column %s)" % (e.getMessage(), e.getLineNumber(), e.getColumnNumber()), req)
return
except:
raise
try:
# serialize
rem2 = rem._aggregation_.register_serialization(srlz, 'http://foresite.cheshire3.org/%s#rem' % req.uri)
rd = rem2.get_serialization()
data = rd.data
if srlz == srlzHash['rdfa.html']:
data = '<xhtml xmlns="http://www.w3.org/1999/xhtml"><body><i>Invisible RDFa resource map follows, it must have validated okay. [view source] :)</i>' + data + "</body></xhtml>"
except Exception, e:
self.error("Could not serialize Aggregation to Resource Map: %s" % e.message, req)
return
self.send(data, req, ct=srlz.mimeType)
def handler(req):
# do stuff
myhandler = validateHandler()
try:
myhandler.handle(req)
except:
req.content_type = "text/html"
cgitb.Hook(file = req).handle()
return apache.OK
| Python |
#!/home/cheshire/install/bin/python -i
# depth first web crawler
import sys, os, re
import urllib
import urlparse
from lxml import etree
import StringIO
import hashlib
from foresite import *
from foresite import conneg
from rdflib import URIRef, Literal
parser = etree.HTMLParser()
nonHttpRe = re.compile("^(mailto|ftp|telnet):(.*)", re.I)
nonHtmlRe = re.compile("\.(pdf|doc|ppt|odp|jpg|png|gif|zip|gz|tgz|bz2|ps|mpg|java|py|c|h|txt|num)$", re.I)
contentTypes = {}
md5Hash = {}
pageHash = {}
starts = []
webGraphs = [{}]
start = "http://www.openarchives.org/ore/1.0/"
restrictTemplates = [re.compile("http://www\.openarchives\.org/ore/1\.0.*")]
stack = [(start, -1)]
srlz = RdfLibSerializer(format='pretty-xml')
aggr = Aggregation(start + '#aggregation')
def crawl(uri, src):
if not pageHash.has_key(uri):
pid = len(pageHash)
pageHash[uri] = pid
else:
pid = pageHash[uri]
linkHash = webGraphs[-1]
if not linkHash.has_key(pid):
linkHash[pid] = []
else:
return
print "processing %s->%s: %s" % (src, pid, uri)
if src != -1:
linkHash[src].append(pid)
#fetch, find links, record, crawl
try:
fh = urllib.urlopen(uri)
except:
print "... BROKEN"
return
ar = AggregatedResource(uri)
ct = fh.headers['content-type']
try:
cl = fh.headers['content-length']
ar._dc.extent = Literal(cl)
except:
pass
try:
lm = fh.headers['last-modified']
ar._dcterms.modified = Literal(lm)
except:
pass
mt = conneg.parse(ct)
if mt:
ct = mt[0].mimetype1 + '/' + mt[0].mimetype2
ar._dc.format = Literal(ct)
if ct != 'text/html':
aggr.add_resource(ar)
try:
contentTypes[ct] += 1
except KeyError:
contentTypes[ct] = 1
return
data = fh.read()
fh.close()
# hash page for redirects/duplicates etc
md5 = hashlib.new('md5')
md5.update(data)
hd = md5.hexdigest()
if md5Hash.has_key(hd):
print "%s == %s" % (pid, md5Hash[hd])
return
else:
md5Hash[hd] = pid
# only add it here
aggr.add_resource(ar)
try:
dom = etree.parse(StringIO.StringIO(data), parser)
except:
print " --- failed to parse"
return
title = dom.xpath('//title/text()')
if title:
ar._dc.title = Literal(title[0])
links = dom.xpath('//a/@href')
frames = dom.xpath('//frame/@src')
links.extend(frames)
imgs = dom.xpath('//img/@src')
links.extend(imgs)
css = dom.xpath('//link/@href')
links.extend(css)
for l in links:
l = l.strip()
if l.find('#') > -1:
l = l[:l.find('#')]
if not l:
# was just a hash URL
continue
if l[0] == "/":
l = urlparse.urljoin(uri, l)
elif l[:7].lower() != "http://" and l[:8].lower() != "https://":
# check other protocols
if nonHttpRe.search(l):
continue
# put in current directory
l = urlparse.urljoin(uri, l)
# check if we really want to crawl...
if nonHtmlRe.search(l):
# ignore common stuff
# print "Skipping: %s" % chk
pass
elif pageHash.has_key(l):
# ignore already done
# print "Skipping: %s" % chk
pass
else:
match = 1
for t in restrictTemplates:
if not t.match(l):
match = 0
break
if match:
stack.append((l, pid))
while stack:
(l, pid) = stack.pop(0)
crawl(l, pid)
rem = aggr.register_serialization(srlz, '#rem')
rd = rem.get_serialization()
print rd.data
| Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
version = '1.1'
setup(name='foresite',
version=version,
description='Library for constructing, parsing, manipulating and serializing OAI-ORE Resource Maps',
long_description="""\
""",
classifiers=[],
author='Rob Sanderson',
author_email='azaroth@liv.ac.uk',
url='http://code.google.com/p/foresite-toolkit/',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['rdflib', 'lxml'],
test_suite='foresite.tests.test_suite'
)
| Python |
#!/home/cheshire/install/bin/python -i
from foresite import *
import urllib2
import os, sys
import getopt
# given an initial starting point, crawl nested and linked ORE aggregations
# download aggregated resources
# content negotiation for prefered ReM format
def usage():
print """Usage:
%s [-r] [-d DEPTH] [-f ReM-Format] [-remDir ReM-Directory]
%s [-resDir Resource-Directory] URI
ReM-Format is one of: xml, atom, rdfa, nt, n3, turtle""" % (sys.argv[0], ' ' * len(sys.argv[0]))
sys.exit(0)
optstr = "rd:f:"
longs = ['remDir=', 'arDir=']
mimeHash = {'atom' : 'application/atom+xml',
'rdfa' : 'application/xhtml+xml',
'xml' : 'application/rdf+xml',
'nt' : 'text/plain',
'n3' : 'text/rdf+n3',
'turtle' : 'application/x-turtle'}
optlist, args = getopt.getopt(sys.argv[1:], optstr, longs)
if len(args) != 1:
usage()
else:
uri = args[0]
maxDepth = -1
fetchAR = 0
remDirectory = 'rems'
arDirectory = 'resources'
accept_header = ''
for o in optlist:
if o[0] == '-d':
try:
maxDepth = int(o[1])
except:
print "DEPTH must be an integer"
usage()
elif o[0] == '-r':
fetchAR = 1
elif o[0] == '--remDir':
remDirectory = o[1]
elif o[0] == '--resDir':
arDirectory = o[1]
elif o[0] == '-f':
if not mimeHash.has_key(o[1]):
print "Unknown format '%s'" % o[1]
usage()
else:
# pass through accept_header
accept_header = '%s;q=1.0' % mimeHash[o[1]]
else:
print "Unknown option: %s" % o[0]
usage()
done = {}
doneAr = {}
stack = {}
p = RdfLibParser()
ap = AtomParser()
rdfap = RdfAParser()
if not os.path.exists(remDirectory):
os.mkdir(remDirectory)
if not os.path.exists(arDirectory):
os.mkdir(arDirectory)
stack[uri] = 0
while stack:
# NB unordered pop
(next, depth) = stack.popitem()
done[next] = 1
if maxDepth > -1 and depth > maxDepth:
continue
print "Fetching %s..." % next
rd = ReMDocument(next, accept=accept_header)
fn = rd.uri.replace('http://', '')
fn = fn.replace('/', '_')
fn = fn.replace('\\', '_')
fn = os.path.join(remDirectory, fn)
fh = open(fn, 'w')
fh.write(rd.data)
fh.close()
try:
if rd.format == 'atom':
rem = ap.parse(rd)
elif rd.format == 'rdfa':
rem = rdfap.parse(rd)
else:
rem = p.parse(rd)
except:
# unparsable
print 'URI %s is unparsable' % next
raise
# XXX Maybe write in alternative formats?
# find refs to all other aggregations
oas = rem.aggregation.do_sparql('SELECT ?a WHERE {?a a ore:Aggregation }')
for oa in oas:
oa = str(oa[0])
if not done.has_key(oa) and not stack.has_key(oa):
stack[oa] = depth + 1
oas = rem.aggregation.do_sparql('SELECT ?a WHERE {?b ore:isAggregatedBy ?a }')
for oa in oas:
oa = str(oa[0])
if not done.has_key(oa) and not stack.has_key(oa):
stack[oa] = depth + 1
if fetchAR:
# find aggregated resources
ars = rem.aggregation.do_sparql('SELECT ?a WHERE {?b ore:aggregates ?a }')
for ar in ars:
ar = str(ar[0])
if not done.has_key(ar) and not stack.has_key(ar) and not doneAr.has_key(ar):
print "Fetching Aggregated Resource: %s..." % ar
req = urllib2.Request(ar)
fh = urllib2.urlopen(req)
data = fh.read()
fh.close()
fn = ar.replace('http://', '')
fn = fn.replace('/', '_')
fn = fn.replace('\\', '_')
fn = os.path.join(arDirectory, fn)
fh = open(fn, 'w')
fh.write(data)
fh.close()
doneAr[ar] = 1
| Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
version = '1.1'
setup(name='foresite',
version=version,
description='Library for constructing, parsing, manipulating and serializing OAI-ORE Resource Maps',
long_description="""\
""",
classifiers=[],
author='Rob Sanderson',
author_email='azaroth@liv.ac.uk',
url='http://code.google.com/p/foresite-toolkit/',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['rdflib', 'lxml'],
test_suite='foresite.tests.test_suite'
)
| Python |
#!/home/cheshire/install/bin/python -i
# depth first web crawler
import sys, os, re
import urllib
import urlparse
from lxml import etree
import StringIO
import hashlib
from foresite import *
from foresite import conneg
from rdflib import URIRef, Literal
parser = etree.HTMLParser()
nonHttpRe = re.compile("^(mailto|ftp|telnet):(.*)", re.I)
nonHtmlRe = re.compile("\.(pdf|doc|ppt|odp|jpg|png|gif|zip|gz|tgz|bz2|ps|mpg|java|py|c|h|txt|num)$", re.I)
contentTypes = {}
md5Hash = {}
pageHash = {}
starts = []
webGraphs = [{}]
start = "http://www.openarchives.org/ore/1.0/"
restrictTemplates = [re.compile("http://www\.openarchives\.org/ore/1\.0.*")]
stack = [(start, -1)]
srlz = RdfLibSerializer(format='pretty-xml')
aggr = Aggregation(start + '#aggregation')
def crawl(uri, src):
if not pageHash.has_key(uri):
pid = len(pageHash)
pageHash[uri] = pid
else:
pid = pageHash[uri]
linkHash = webGraphs[-1]
if not linkHash.has_key(pid):
linkHash[pid] = []
else:
return
print "processing %s->%s: %s" % (src, pid, uri)
if src != -1:
linkHash[src].append(pid)
#fetch, find links, record, crawl
try:
fh = urllib.urlopen(uri)
except:
print "... BROKEN"
return
ar = AggregatedResource(uri)
ct = fh.headers['content-type']
try:
cl = fh.headers['content-length']
ar._dc.extent = Literal(cl)
except:
pass
try:
lm = fh.headers['last-modified']
ar._dcterms.modified = Literal(lm)
except:
pass
mt = conneg.parse(ct)
if mt:
ct = mt[0].mimetype1 + '/' + mt[0].mimetype2
ar._dc.format = Literal(ct)
if ct != 'text/html':
aggr.add_resource(ar)
try:
contentTypes[ct] += 1
except KeyError:
contentTypes[ct] = 1
return
data = fh.read()
fh.close()
# hash page for redirects/duplicates etc
md5 = hashlib.new('md5')
md5.update(data)
hd = md5.hexdigest()
if md5Hash.has_key(hd):
print "%s == %s" % (pid, md5Hash[hd])
return
else:
md5Hash[hd] = pid
# only add it here
aggr.add_resource(ar)
try:
dom = etree.parse(StringIO.StringIO(data), parser)
except:
print " --- failed to parse"
return
title = dom.xpath('//title/text()')
if title:
ar._dc.title = Literal(title[0])
links = dom.xpath('//a/@href')
frames = dom.xpath('//frame/@src')
links.extend(frames)
imgs = dom.xpath('//img/@src')
links.extend(imgs)
css = dom.xpath('//link/@href')
links.extend(css)
for l in links:
l = l.strip()
if l.find('#') > -1:
l = l[:l.find('#')]
if not l:
# was just a hash URL
continue
if l[0] == "/":
l = urlparse.urljoin(uri, l)
elif l[:7].lower() != "http://" and l[:8].lower() != "https://":
# check other protocols
if nonHttpRe.search(l):
continue
# put in current directory
l = urlparse.urljoin(uri, l)
# check if we really want to crawl...
if nonHtmlRe.search(l):
# ignore common stuff
# print "Skipping: %s" % chk
pass
elif pageHash.has_key(l):
# ignore already done
# print "Skipping: %s" % chk
pass
else:
match = 1
for t in restrictTemplates:
if not t.match(l):
match = 0
break
if match:
stack.append((l, pid))
while stack:
(l, pid) = stack.pop(0)
crawl(l, pid)
rem = aggr.register_serialization(srlz, '#rem')
rd = rem.get_serialization()
print rd.data
| Python |
#!/home/cheshire/install/bin/python -i
from foresite import *
import urllib2
import os, sys
import getopt
# given an initial starting point, crawl nested and linked ORE aggregations
# download aggregated resources
# content negotiation for prefered ReM format
def usage():
print """Usage:
%s [-r] [-d DEPTH] [-f ReM-Format] [-remDir ReM-Directory]
%s [-resDir Resource-Directory] URI
ReM-Format is one of: xml, atom, rdfa, nt, n3, turtle""" % (sys.argv[0], ' ' * len(sys.argv[0]))
sys.exit(0)
optstr = "rd:f:"
longs = ['remDir=', 'arDir=']
mimeHash = {'atom' : 'application/atom+xml',
'rdfa' : 'application/xhtml+xml',
'xml' : 'application/rdf+xml',
'nt' : 'text/plain',
'n3' : 'text/rdf+n3',
'turtle' : 'application/x-turtle'}
optlist, args = getopt.getopt(sys.argv[1:], optstr, longs)
if len(args) != 1:
usage()
else:
uri = args[0]
maxDepth = -1
fetchAR = 0
remDirectory = 'rems'
arDirectory = 'resources'
accept_header = ''
for o in optlist:
if o[0] == '-d':
try:
maxDepth = int(o[1])
except:
print "DEPTH must be an integer"
usage()
elif o[0] == '-r':
fetchAR = 1
elif o[0] == '--remDir':
remDirectory = o[1]
elif o[0] == '--resDir':
arDirectory = o[1]
elif o[0] == '-f':
if not mimeHash.has_key(o[1]):
print "Unknown format '%s'" % o[1]
usage()
else:
# pass through accept_header
accept_header = '%s;q=1.0' % mimeHash[o[1]]
else:
print "Unknown option: %s" % o[0]
usage()
done = {}
doneAr = {}
stack = {}
p = RdfLibParser()
ap = AtomParser()
rdfap = RdfAParser()
if not os.path.exists(remDirectory):
os.mkdir(remDirectory)
if not os.path.exists(arDirectory):
os.mkdir(arDirectory)
stack[uri] = 0
while stack:
# NB unordered pop
(next, depth) = stack.popitem()
done[next] = 1
if maxDepth > -1 and depth > maxDepth:
continue
print "Fetching %s..." % next
rd = ReMDocument(next, accept=accept_header)
fn = rd.uri.replace('http://', '')
fn = fn.replace('/', '_')
fn = fn.replace('\\', '_')
fn = os.path.join(remDirectory, fn)
fh = open(fn, 'w')
fh.write(rd.data)
fh.close()
try:
if rd.format == 'atom':
rem = ap.parse(rd)
elif rd.format == 'rdfa':
rem = rdfap.parse(rd)
else:
rem = p.parse(rd)
except:
# unparsable
print 'URI %s is unparsable' % next
raise
# XXX Maybe write in alternative formats?
# find refs to all other aggregations
oas = rem.aggregation.do_sparql('SELECT ?a WHERE {?a a ore:Aggregation }')
for oa in oas:
oa = str(oa[0])
if not done.has_key(oa) and not stack.has_key(oa):
stack[oa] = depth + 1
oas = rem.aggregation.do_sparql('SELECT ?a WHERE {?b ore:isAggregatedBy ?a }')
for oa in oas:
oa = str(oa[0])
if not done.has_key(oa) and not stack.has_key(oa):
stack[oa] = depth + 1
if fetchAR:
# find aggregated resources
ars = rem.aggregation.do_sparql('SELECT ?a WHERE {?b ore:aggregates ?a }')
for ar in ars:
ar = str(ar[0])
if not done.has_key(ar) and not stack.has_key(ar) and not doneAr.has_key(ar):
print "Fetching Aggregated Resource: %s..." % ar
req = urllib2.Request(ar)
fh = urllib2.urlopen(req)
data = fh.read()
fh.close()
fn = ar.replace('http://', '')
fn = fn.replace('/', '_')
fn = fn.replace('\\', '_')
fn = os.path.join(arDirectory, fn)
fh = open(fn, 'w')
fh.write(data)
fh.close()
doneAr[ar] = 1
| Python |
import os
import urllib, urllib2
from rdflib import ConjunctiveGraph, URIRef, BNode, Literal
from utils import *
from StringIO import StringIO
from utils import unconnectedAction
from foresite import libraryName, libraryUri, libraryEmail
from foresite import conneg
# --- Object Class Definitions ---
class Graph(ConjunctiveGraph):
def __init__(self, store=None, id=None):
# id *should* be aggregation URI
if store != None and id != None:
ConjunctiveGraph.__init__(self, store, id)
else:
ConjunctiveGraph.__init__(self)
for (key,val) in namespaces.iteritems():
self.bind(key, val)
def find_namespace(self, name):
# find best namespace
for k in namespaceSearchOrder:
v = elements[k]
if name in v:
return namespaces[k]
return ''
def split_uri(self, uri):
# given namespaced uri, find base property name
slsplit = uri.split('/')
hsplit = slsplit[-1].split('#')
return (uri[:0-len(hsplit[-1])], hsplit[-1])
class OREResource(object):
graph = None
uri = ""
currNs = ""
agents = {}
triples = {}
aggregations = []
def __init__(self, uri):
graph = Graph()
self._graph_ = graph
if isinstance(uri, URIRef) or isinstance(uri, BNode):
self._uri_ = uri
else:
self._uri_ = URIRef(uri)
self._currNs_ = ''
self._agents_ = {}
self._triples_ = {}
self._aggregations_ = []
def __str__(self):
return str(self.uri)
def __getattr__(self, name):
# fetch value from graph
cns = self.currNs
if name[0] == "_" and name[-1] == "_":
return getattr(self, name[1:-1])
elif name[0] == "_" and namespaces.has_key(name[1:]):
# we're looking for self.namespace.property
self._currNs_ = name[1:]
return self
elif cns:
val = self.get_value(name, cns)
self._currNs_ = ''
else:
val = self.get_value(name)
return val
def __setattr__(self, name, value):
if name[0] == "_" and name[-1] == "_":
return object.__setattr__(self, name[1:-1], value)
elif name[0] == "_" and namespaces.has_key(name[1:]):
# we're looking for self.namespace.property
object.__setattr__(self, 'currNs', name[1:])
return self
elif self.currNs:
val = self.set_value(name, value, self.currNs)
else:
val = self.set_value(name, value)
object.__setattr__(self, 'currNs', '')
return val
def set_value(self, name, value, ns=None):
if ns:
nsobj = namespaces[ns]
else:
nsobj = self.graph.find_namespace(name)
if value == []:
for val in self.graph.objects(self.uri, nsobj[name]):
self.graph.remove((self.uri, nsobj[name], val))
else:
if not isinstance(value, URIRef) and not isinstance(value, BNode):
value = Literal(value)
self.graph.add((self.uri, nsobj[name], value))
return 1
def get_value(self, name, ns=None):
if ns:
nsobj = namespaces[ns]
else:
nsobj = self.graph.find_namespace(name)
l = []
for obj in self.graph.objects(self.uri, nsobj[name]):
l.append(obj)
return l
def add_triple(self, trip):
self._triples_[trip._uri_] = trip
def remove_triple(self, trip):
del self._triples_[trip._uri_]
def predicates(self):
return list(self.graph.predicates())
def add_agent(self, who, type):
self._agents_[who._uri_] = who
setattr(self, type, who._uri_)
def remove_agent(self, who, type):
del self._agents_[who._uri_]
ns = self.graph.find_namespace(type)
self._graph_.remove((self._uri_, ns[type], who._uri_))
def on_add(self, aggr, proxy):
self._aggregations_.append((aggr, proxy))
def on_remove(self, aggr, proxy):
self._aggregations_.remove((aggr, proxy))
def get_proxy(self, aggr=None):
if aggr:
for (a,p) in self._aggregations_:
if a == aggr:
return p
return None
elif self._aggregations_:
return self._aggregations_[0][1]
else:
return None
class ResourceMap(OREResource):
aggregation = None
serializer = None
def __init__(self, uri):
OREResource.__init__(self, uri)
self._aggregation_ = None
self._serializer_ = None
self.type = namespaces['ore']['ResourceMap']
self.add_triple(rem_type)
def register_serializer(self, serializer):
# Deprecated
self.register_serialization(serializer)
def register_serialization(self, serializer):
if self.serializer:
raise OreException("ResourceMap already has serializer")
if not serializer.mimeType in self._dc.format:
self.format = serializer.mimeType
self._serializer_ = serializer
def get_serialization(self, page=-1):
return self._serializer_.serialize(self)
def set_aggregation(self, agg):
if self.aggregation:
raise OreException("ResourceMap already has an aggregation set")
self._aggregation_ = agg
self.describes = agg.uri
agg.on_describe(self)
class Aggregation(OREResource):
resourceMaps = []
resources = []
fullGraph = None
def __init__(self, uri):
OREResource.__init__(self, uri)
self._resources_ = []
self._resourceMaps_ = []
self._fullGraph_ = None
self._generateProxies_ = False
self.type = namespaces['ore']['Aggregation']
self.add_triple(aggr_type)
def __iter__(self):
l = [x[0] for x in self._resources_]
return l.__iter__()
def __len__(self):
return len(self._resources_)
def __contains__(self, what):
for x in self._resources_:
if what in x or what == str(x[0].uri) or what == str(x[1].uri):
return True
return False
def __getitem__(self, x):
if isinstance(x, int):
return self.resources[x][0]
if isinstance(x, str):
x = URIRef(x)
for r in self.resources:
if x == r[0].uri:
return r[0]
raise KeyError(x)
def on_describe(self, rem):
self._resourceMaps_.append(rem)
def add_resource(self, res, proxy=None):
for x in self._resources_:
if x[0] == res:
raise KeyError('Aggregation %s already aggregates %s' % (self.uri, res.uri))
self.aggregates = res.uri
if proxy or self.generateProxies:
if not proxy:
uri = gen_proxy_uri(res, self)
proxy = Proxy(uri)
proxy.set_forIn(res, self)
else:
proxy = None
self._resources_.append((res, proxy))
res.on_add(self, proxy)
return proxy
# List API
def append(self, res):
self.add_resource(res)
# Set API
def add(self, res):
self.add_resource(res)
def remove_resource(self, res):
tup = None
for x in self._resources_:
if x[0] == res:
tup = x
break
if tup:
self._resources_.remove(tup)
res.on_remove(self, tup[1])
del tup[1]
# List, Set API
def remove(self, res):
self.remove_resource(res)
# Set API
def discard(self, res):
self.remove_resource(res)
def get_authoritative(self):
rems = []
for rem in self.resourceMaps:
if self.uri in rem._orex.isAuthoritativeFor:
rems.append(rem)
return rems
def _merge_all_graphs(self, public=1, top=1):
# Only used for sparql query across everything, not serialization
g = Graph()
for rem in self.resourceMaps:
g += rem._graph_
for at in rem._triples_.values():
g += at._graph_
for c in rem._agents_.values():
g += c._graph_
if not rem.created:
g.add((rem._uri_, namespaces['dcterms']['created'], Literal(now())))
g.add((rem._uri_, namespaces['dcterms']['modified'], Literal(now())))
aggr = self
g += aggr._graph_
for at in aggr._triples_.values():
g += at._graph_
for c in aggr._agents_.values():
g += c._graph_
for (res, proxy) in aggr._resources_:
g += res._graph_
if proxy:
g += proxy._graph_
for at in res._triples_.values():
g += at._graph_
for c in res._agents_.values():
g += c._graph_
if isinstance(res, Aggregation):
# include nestings recursively
g += res._merge_all_graphs(public, top=0)
if not g.connected() and unconnectedAction != 'ignore':
raise OreException("Must have connected graph")
if public:
# Remove internal methods
for p in internalPredicates:
for (s,o) in g.subject_objects(p):
g.remove((s,p,o))
if top and not g.objects((aggr._uri_, namespaces['ore']['aggregates'])):
raise OreException("Aggregation must aggregate something")
return g
def do_sparql(self, sparql):
# first merge graphs
g = self._merge_all_graphs()
# now do sparql query on merged graph
return g.query(sparql, initNs=namespaces)
def register_serialization(self, serializer, uri='', **kw):
# Create ReM etc.
if not uri:
if self.uri.find('#') > -1:
uri = self.uri + "_ResourceMap"
else:
uri = self.uri + "#ResourceMap"
rem = ResourceMap(uri)
rem.set_aggregation(self)
rem.register_serializer(serializer)
for (k,v) in kw.iteritems():
if isinstance(v, Agent):
rem.add_agent(v, k)
elif isinstance(v, ArbitraryResource):
setattr(rem, k, v.uri)
rem.add_triple(v)
else:
setattr(rem, k, v)
return rem
def get_serialization(self, uri='', page=-1):
if not uri:
rem = self.resourceMaps[0]
else:
rem = None
for r in self.resourceMaps:
if str(r.uri) == uri:
rem = r
break
if not rem:
raise OreException("Unknown Resource Map: %s" % uri)
return rem.get_serialization()
class Proxy(OREResource):
resource = None
aggregation = None
def __init__(self, uri):
OREResource.__init__(self, uri)
self.type = namespaces['ore']['Proxy']
self._resource_ = None
self._aggregation_ = None
def set_forIn(self, res, aggr):
self.proxyFor = res.uri
self._resource_ = res
self.proxyIn = aggr.uri
self._aggregation_ = aggr
class Agent(OREResource):
def __init__(self, uri=''):
if not uri:
if assignAgentUri:
uri = "urn:uuid:%s" % gen_uuid()
else:
uri = BNode()
OREResource.__init__(self, uri)
class AggregatedResource(OREResource):
# Convenience class for OREResource
pass
class ArbitraryResource(OREResource):
# To allow for arbitrary triples that aren't one of the major
# ORE classes
def __init__(self, uri=None):
if uri == None:
OREResource.__init__(self, BNode())
else:
OREResource.__init__(self, uri)
class ReMDocument(StringIO):
# Serialisation of objects
uri = ""
mimeType = ""
data = ""
format = "" # rdflib name for format
def __init__(self, uri, data='', filename='', mimeType='', format ='', accept=''):
self.uri = uri
if data:
self.data = data
elif filename:
if os.path.exists(filename):
fh = file(filename)
self.data = fh.read()
fh.close()
else:
# try to fetch uri
try:
req = urllib2.Request(uri)
if accept:
# add custom accept header
req.add_header('Accept', accept)
else:
# otherwise add default
req.add_header('Accept', accept_header)
fh = urllib2.urlopen(req)
self.data = fh.read()
self.info = fh.info()
mimeType = self.info.dict.get('content-type', mimeType)
self.uri = fh.geturl()
fh.close()
except:
raise OreException('ReMDocument must either have data or filename')
if not format:
try:
mt = conneg.parse(mimeType)
if mt:
mimeType = mt[0].mimetype1 + '/' + mt[0].mimetype2
except:
pass
mimeHash = {'application/atom+xml' : 'atom',
'application/xhtml+xml' : 'rdfa',
'application/rdf+xml' : 'xml',
'text/plain' : 'nt', # yes, really
'text/rdf+n3' : 'n3',
'application/x-turtle' : 'turtle',
'application/rdf+nt' : 'nt'}
format = mimeHash.get(mimeType, '')
self.mimeType = mimeType
self.format = format
StringIO.__init__(self, self.data)
rem_type = ArbitraryResource(namespaces['ore']['ResourceMap'])
rem_type.label = "ResourceMap"
rem_type.isDefinedBy = namespaces['ore']
aggr_type = ArbitraryResource(namespaces['ore']['Aggregation'])
aggr_type.label = "Aggregation"
aggr_type.isDefinedBy = namespaces['ore']
foresiteAgent = Agent(libraryUri)
foresiteAgent._foaf.name = libraryName
foresiteAgent._foaf.mbox = libraryEmail
| Python |
import doctest
import unittest
import glob
import os
optionflags = (doctest.REPORT_ONLY_FIRST_FAILURE |
doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
def open_file(filename, mode='r'):
"""Helper function to open files from within the tests package."""
return open(os.path.join(os.path.dirname(__file__), filename), mode)
def setUp(test):
test.globs.update(dict(
open_file = open_file,
))
def test_suite():
return unittest.TestSuite([
doctest.DocFileSuite(
'README.txt',
package='foresite',
optionflags=optionflags,
setUp=setUp
)])
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=1)
runner.run(test_suite())
| Python |
import re
from ore import *
from ore import foresiteAgent
from foresite import libraryName, libraryUri, libraryVersion
from utils import namespaces, OreException, unconnectedAction, pageSize
from utils import gen_uuid, build_html_atom_content
from rdflib import URIRef, BNode, Literal, plugin, RDF #syntax, RDF
from rdflib.serializer import Serializer
from rdflib.util import uniq
from lxml import etree
from lxml.etree import Element, SubElement
plugin.register('rdfa', Serializer, 'foresite.RDFaSerializer', 'RDFaSerializer')
plugin.register('json', Serializer, 'foresite.JsonSerializer', 'JsonSerializer')
plugin.register('pretty-json', Serializer, 'foresite.JsonSerializer', 'PrettyJsonSerializer')
class ORESerializer(object):
# Take objects and produce data
mimeType = ""
format = ""
public = 1
def __init__(self, format, public=1):
mimetypes = {'atom' : 'application/atom+xml',
'rdfa' : 'application/xhtml+xml',
'xml' : 'application/rdf+xml',
'nt' : 'text/plain',
'n3' : 'text/rdf+n3',
'turtle' : 'application/x-turtle',
'pretty-xml' : 'application/rdf+xml'
}
self.extensions = {'atom': 'atom',
'rdfa' : 'xhtml',
'xml' : 'xml',
'nt' : 'nt',
'n3' : 'n3',
'turtle' : 'ttl',
'pretty-xml' : 'pretty.xml'
}
self.format = format
self.public = public
self.mimeType = mimetypes.get(format, '')
def merge_graphs(self, rem, page=-1):
g = Graph()
# Put in some sort of recognition of library?
n = now()
if not rem.created:
rem._dcterms.created = n
rem._dcterms.modified = n
if not rem._dcterms.creator:
rem.add_agent(foresiteAgent, 'creator')
aggr = rem.aggregation
stack = [rem, aggr]
if page != -1:
# first is 1, 2, 3 ...
start = (page-1) * pageSize
tosrlz = aggr._resources_[start:start+pageSize]
else:
tosrlz = aggr._resources_
remove = []
for (r, p) in tosrlz:
if isinstance(r, Aggregation):
for a in r._ore.aggregates:
remove.append((r._uri_, namespaces['ore']['aggregates'], a))
stack.extend([r, p])
done = []
while stack:
what = stack.pop(0)
if what == None or what in done:
continue
done.append(what)
g += what._graph_
for at in what._triples_.values():
stack.append(at)
for who in what._agents_.values():
stack.append(who)
if self.public:
# Remove internal methods
for p in internalPredicates:
for (s,o) in g.subject_objects(p):
g.remove((s,p,o))
for trip in remove:
g.remove(trip)
if not aggr._resources_:
raise OreException("Aggregation must aggregate something")
g = self.connected_graph(g, aggr._uri_)
return g
def connected_graph(self, graph, uri):
if unconnectedAction == 'ignore':
return graph
g = Graph()
all_nodes = list(graph.all_nodes())
all_nodes = filter(lambda y: not isinstance(y, Literal), all_nodes)
discovered = {}
visiting = [uri]
while visiting:
x = visiting.pop()
if not discovered.has_key(x):
discovered[x] = 1
for (p, new_x) in graph.predicate_objects(subject=x):
g.add((x,p,new_x))
if (isinstance(new_x, URIRef) or isinstance(new_x, BNode)) and not discovered.has_key(new_x) and not new_x in visiting:
visiting.append(new_x)
for (new_x, p) in graph.subject_predicates(object=x):
g.add((new_x,p,x))
if (isinstance(new_x, URIRef) or isinstance(new_x, BNode)) and not discovered.has_key(new_x) and not new_x in visiting:
visiting.append(new_x)
if len(discovered) != len(all_nodes):
if unconnectedAction == 'warn':
print "Warning: Graph is unconnected, some nodes being dropped"
elif unconnectedAction == 'raise':
raise OreException('Graph to be serialized is unconnected')
elif unconnectedAction != 'drop':
raise ValueError('Unknown unconnectedAction setting: %s' % unconnectedAction)
return g
class RdfLibSerializer(ORESerializer):
def serialize(self, rem, page=-1):
g = self.merge_graphs(rem, page)
data = g.serialize(format=self.format)
uri = str(rem._uri_)
rd = ReMDocument(uri, data, format=self.format, mimeType=self.mimeType)
return rd
class AtomSerializer(ORESerializer):
def __init__(self, format="atom", public=1):
ORESerializer.__init__(self, format)
self.spacesub = re.compile('(?<=>)[ ]+(?=<)')
self.done_triples = []
def generate_rdf(self, parent, sg):
# remove already done, then serialize to rdf/xml
for t in self.done_triples:
sg.remove(t)
data = sg.serialize(format='xml')
root = etree.fromstring(data)
for child in root:
parent.append(child)
def make_agent(self, parent, agent):
n = SubElement(parent, '{%s}name' % namespaces['atom'])
try:
name = agent._foaf.name[0]
n.text = str(name)
self.done_triples.append((agent._uri_, namespaces['foaf']['name'], name))
except:
pass
if agent._foaf.mbox:
n = SubElement(parent, '{%s}email' % namespaces['atom'])
mb = agent._foaf.mbox[0]
self.done_triples.append((agent._uri_, namespaces['foaf']['mbox'], mb))
mb = str(mb)
if mb[:7] == "mailto:":
mb = mb[7:]
n.text = mb
# There's currently nowhere for URI to go!
#if not isinstance(agent._uri_, BNode):
# n = SubElement(parent, 'uri')
# n.text = str(agent._uri_)
# Silly, but it's what the spec says...
if agent._foaf.page:
n = SubElement(parent, '{%s}uri' % namespaces['atom'])
fp = agent._foaf.page[0]
self.done_triples.append((agent._uri_, namespaces['foaf']['page'], fp))
n.text = fp
def make_link(self, parent, rel, t, g):
iana = str(namespaces['iana'])
if rel.startswith(iana):
rel = rel[len(iana):]
e = SubElement(parent, '{%s}link' % namespaces['atom'], rel=rel, href=str(t))
fmts = list(g.objects(t, namespaces['dc']['format']))
if fmts:
f = fmts[0]
e.set('type', str(f))
self.done_triples.append((t, namespaces['dc']['format'], f))
langs = list(g.objects(t, namespaces['dc']['language']))
if langs:
l = langs[0]
e.set('hreflang', str(langs[0]))
self.done_triples.append((t, namespaces['dc']['language'], l))
exts = list(g.objects(t, namespaces['dc']['extent']))
if exts:
l = exts[0]
e.set('length', str(l))
self.done_triples.append((t, namespaces['dc']['extent'], l))
titls = list(g.objects(t, namespaces['dc']['title']))
if titls:
l = titls[0]
e.set('title', str(l))
self.done_triples.append((t, namespaces['dc']['title'], l))
def serialize(self, rem, page=-1):
aggr = rem._aggregation_
g = self.merge_graphs(rem)
# make nsmap better
nm = g.namespace_manager
nsmap = {'atom' : str(namespaces['atom'])}
poss = uniq(g.predicates()) + uniq(g.objects(None, RDF.type))
for pred in poss:
pf,ns,l = nm.compute_qname(pred)
nsmap[pf] = ns
root = Element("{%s}entry" % namespaces['atom'], nsmap=nsmap)
# entry/id == tag for entry == ReM dc:identifier
# if not exist, generate Yet Another uuid
e = SubElement(root, '{%s}id' % namespaces['atom'])
if rem._dc.identifier:
dcid = rem._dc.identifier[0]
e.text = str(dcid)
self.done_triples.append((rem._uri_, namespaces['dc']['identifier'], dcid))
else:
e.text = "urn:uuid:%s" % gen_uuid()
# entry/title == Aggr's dc:title
title = aggr._dc.title
tns = 'dc'
if not title:
title = aggr._dcterms.title
tns = 'dcterms'
if not title:
raise OreException("Atom Serialisation requires title on aggregation")
else:
e = SubElement(root, '{%s}title' % namespaces['atom'])
dctit = title[0]
e.text = str(dctit)
self.done_triples.append((aggr._uri_, namespaces[tns]['title'], dctit))
# entry/author == Aggr's dcterms:creator
for who in aggr._dcterms.creator:
e = SubElement(root, '{%s}author' % namespaces['atom'])
agent = aggr._agents_[who]
self.make_agent(e, agent)
self.done_triples.append((aggr._uri_, namespaces['dcterms']['creator'], agent._uri_))
# entry/contributor == Aggr's dcterms:contributor
for bn in aggr._dcterms.contributor:
e = SubElement(root, '{%s}contributor' % namespaces['atom'])
agent = aggr._agents_[who]
self.make_agent(e, agent)
self.done_triples.append((aggr._uri_, namespaces['dcterms']['contributor'], agent._uri_))
# entry/category[@scheme="(magic)"][@term="(datetime)"]
for t in aggr._dcterms.created:
t = t.strip()
e = SubElement(root, '{%s}category' % namespaces['atom'], term=str(t),
scheme="http://www.openarchives.org/ore/terms/datetime/created")
for t in aggr._dcterms.modified:
t = t.strip()
e = SubElement(root, '{%s}category' % namespaces['atom'], term=str(t),
scheme="http://www.openarchives.org/ore/terms/datetime/modified")
# entry/category == Aggr's rdf:type
for t in aggr._rdf.type:
e = SubElement(root, '{%s}category' % namespaces['atom'], term=str(t))
try:
scheme = list(g.objects(t, namespaces['rdfs']['isDefinedBy']))[0]
e.set('scheme', str(scheme))
self.done_triples.append((t, namespaces['rdfs']['isDefinedBy'], scheme))
except:
pass
try:
label = list(g.objects(t, namespaces['rdfs']['label']))[0]
e.set('label', str(label))
self.done_triples.append((t, namespaces['rdfs']['label'], label))
except:
pass
self.done_triples.append((aggr._uri_, namespaces['rdf']['type'], t))
# entry/summary
if aggr._dc.description:
e = SubElement(root, '{%s}summary' % namespaces['atom'])
desc = aggr._dc.description[0]
e.text = str(desc)
self.done_triples.append((aggr._uri_, namespaces['dc']['description'], desc))
# All aggr links:
done = [namespaces['rdf']['type'],
namespaces['ore']['aggregates'],
namespaces['dcterms']['creator'],
namespaces['dcterms']['contributor'],
namespaces['dc']['title'],
namespaces['dc']['description']
]
for (p, o) in g.predicate_objects(aggr.uri):
if not p in done:
if isinstance(o, URIRef):
self.make_link(root, p, o, g)
self.done_triples.append((aggr._uri_, p, o))
# entry/content // link[@rel="alternate"]
# Do we have a splash page?
altDone = 0
atypes = aggr._rdf._type
possAlts = []
for (r, p) in aggr.resources:
mytypes = r._rdf.type
if namespaces['eurepo']['humanStartPage'] in mytypes:
altDone = 1
self.make_link(root, 'alternate', r.uri, g)
break
# check if share non Aggregation type
# eg aggr == article and aggres == article, likely
# to be good alternate
for m in mytypes:
if m != namespaces['ore']['Aggregation'] and \
m in atypes:
possAlts.append(r.uri)
if not altDone and possAlts:
# XXX more intelligent algorithm here
self.make_link(root, '{%s}alternate' % namespaces['atom'], possAlts[0], g)
altDone = 1
if not altDone and build_html_atom_content:
e = SubElement(root, '{%s}content' % namespaces['atom'])
e.set('type', 'html')
# make some representative html
# this can get VERY LONG so default to not doing this
html = ['<ul>']
for (r, p) in aggr.resources:
html.append('<li><a href="%s">%s</a></li>' % (r.uri, r.title[0]))
html.append('</ul>')
e.text = '\n'.join(html)
else:
e = SubElement(root, '{%s}content' % namespaces['atom'])
e.set('type', 'html')
e.text = "No Content"
# entry/link[@rel='self'] == URI-R
self.make_link(root, 'self', rem._uri_, g)
# entry/link[@rel='ore:describes'] == URI-A
self.make_link(root, namespaces['ore']['describes'], aggr._uri_, g)
### These are generated automatically in merge_graphs
# entry/published == ReM's dcterms:created
if rem._dcterms.created:
e = SubElement(root, '{%s}published' % namespaces['atom'])
c = rem._dcterms.created[0]
md = str(c)
if md.find('Z') == -1:
# append Z
md += "Z"
e.text = md
self.done_triples.append((rem._uri_, namespaces['dcterms']['created'], c))
# entry/updated == ReM's dcterms:modified
e = SubElement(root, '{%s}updated' % namespaces['atom'])
if rem._dcterms.modified:
c = rem._dcterms.modified[0]
md = str(c)
if md.find('Z') == -1:
# append Z
md += "Z"
e.text = str(md)
self.done_triples.append((rem._uri_, namespaces['dcterms']['modified'], c))
else:
e.text = now()
# entry/rights == ReM's dc:rights
if rem._dc.rights:
e = SubElement(root, '{%s}rights' % namespaces['atom'])
r = rem._dc.rights[0]
e.text = str(r)
self.done_triples.append((rem._uri_, namespaces['dc']['rights'], r))
# entry/source/author == ReM's dcterms:creator
if rem._dcterms.creator:
# Should at least be our generator! (right?)
src = SubElement(root, '{%s}source' % namespaces['atom'])
for who in rem._dcterms.creator:
e = SubElement(src, '{%s}author' % namespaces['atom'])
agent = rem._agents_[who]
self.make_agent(e, agent)
self.done_triples.append((rem._uri_, namespaces['dcterms']['creator'], agent._uri_))
for who in rem._dcterms.contributor:
e = SubElement(src, '{%s}contributor' % namespaces['atom'])
agent = rem._agents_[who]
self.make_agent(e, agent)
self.done_triples.append((rem._uri_, namespaces['dcterms']['contributor'], agent._uri_))
e = SubElement(src, '{%s}generator' % namespaces['atom'], uri=str(libraryUri), version=str(libraryVersion))
e.text = str(libraryName)
# Remove aggregation, resource map props already done
# All of agg res needs to be done
for (r, p) in aggr.resources:
self.make_link(root, namespaces['ore']['aggregates'], r.uri, g)
self.done_triples.append((aggr._uri_, namespaces['ore']['aggregates'], r._uri_))
# Now create ore:triples
# and populate with rdf/xml
trips = SubElement(root, '{%s}triples' % namespaces['ore'])
self.generate_rdf(trips, g)
data = etree.tostring(root, pretty_print=True)
#data = data.replace('\n', '')
#data = self.spacesub.sub('', data)
uri = str(rem._uri_)
self.done_triples = []
return ReMDocument(uri, data, format='atom', mimeType=self.mimeType)
class OldAtomSerializer(ORESerializer):
def __init__(self, format="atom0.9", public=1):
ORESerializer.__init__(self, format)
self.spacesub = re.compile('(?<=>)[ ]+(?=<)')
self.done_triples = []
def remove_link_attrs(self, sg, a):
# only remove first from each list
for ns in (namespaces['dc']['format'], namespaces['dc']['title'], namespaces['dc']['language'], namespaces['dc']['extent']):
objs = list(sg.objects(a, ns))
if objs:
sg.remove((a, ns, objs[0]))
def generate_rdf(self, parent, what):
# extract not processed parts of graph
# serialise with rdflib
# parse with lxml and add to parent element
sg = Graph()
sg += what.graph
for at in what.triples.values():
sg += at.graph
for a in what.agents.values():
sg += a.graph
for a in what.type:
for b in sg.objects(a, namespaces['rdfs']['isDefinedBy']):
sg.remove((a, namespaces['rdfs']['isDefinedBy'], b))
for b in sg.objects(a, namespaces['rdfs']['label']):
sg.remove((a, namespaces['rdfs']['label'], b))
sg.remove((what.uri, namespaces['rdf']['type'], a))
for t in self.done_triples:
sg.remove(t)
if isinstance(what, Aggregation) or isinstance(what, AggregatedResource):
# remove atom srlzd bits
self.remove_link_attrs(sg, what.uri)
try:
sg.remove((what.uri, namespaces['dc']['description'], what.description[0]))
except:
pass
for a in what.creator:
sg.remove((what.uri, namespaces['dcterms']['creator'], a))
for a in what.contributor:
sg.remove((what.uri, namespaces['dcterms']['contributor'], a))
for a in what._ore.similarTo:
self.remove_link_attrs(sg, a)
sg.remove((what.uri, namespaces['ore']['similarTo'], a))
for a in what._ore.aggregates:
sg.remove((what.uri, namespaces['ore']['aggregates'], a))
try:
# aggregation uses dcterms rights, as it's a URI
for a in what._dcterms.rights:
self.remove_link_attrs(sg, a)
sg.remove((what.uri, namespaces['dcterms']['rights'], a))
except:
pass
try:
sg.remove((what.uri, namespaces['foaf']['logo'], what._foaf.logo))
except:
pass
if isinstance(what, Aggregation):
for a in sg.objects(what.uri, namespaces['ore']['isDescribedBy']):
self.remove_link_attrs(sg, a)
sg.remove((what.uri, namespaces['ore']['isDescribedBy'], a))
self.done_triples.extend(list(sg))
else:
# remove isAggregatedBy == rel=related
for a in what._ore.isAggregatedBy:
sg.remove((what.uri, namespaces['ore']['isAggregatedBy'], a))
self.done_triples = []
# and add in proxy info
proxy = what._currProxy_
if proxy:
sg += proxy.graph
for a in proxy._agents_.values():
sg += a.graph
# remove proxyFor, proxyIn
for a in proxy._ore.proxyFor:
sg.remove((proxy.uri, namespaces['ore']['proxyFor'], a))
for a in proxy._ore.proxyIn:
sg.remove((proxy.uri, namespaces['ore']['proxyIn'], a))
for a in proxy.type:
for b in sg.objects(a, namespaces['rdfs']['isDefinedBy']):
sg.remove((a, namespaces['rdfs']['isDefinedBy'], b))
for b in sg.objects(a, namespaces['rdfs']['label']):
sg.remove((a, namespaces['rdfs']['label'], b))
sg.remove((proxy.uri, namespaces['rdf']['type'], a))
elif isinstance(what, ResourceMap):
self.remove_link_attrs(sg, what.uri)
for a in what.describes:
sg.remove((what.uri, namespaces['ore']['describes'], a))
for a in what.creator:
sg.remove((what.uri, namespaces['dcterms']['creator'], a))
try:
# ReM uses dc rights, as it's a string
sg.remove((what.uri, namespaces['dc']['rights'], what._dc.rights[0]))
except:
pass
try:
sg.remove((what.uri, namespaces['dcterms']['modified'], what._dcterms.modified[0]))
except:
pass
try:
sg.remove((what.uri, namespaces['foaf']['logo'], what._foaf.logo[0]))
except:
pass
try:
sg.remove((what.uri, namespaces['ore']['describes'], what._ore.describes[0]))
except:
pass
self.done_triples = []
data = sg.serialize(format='xml')
root = etree.fromstring(data)
for child in root:
parent.append(child)
def make_agent(self, parent, agent):
n = SubElement(parent, 'name')
try:
name = agent._foaf.name[0]
n.text = str(name)
self.done_triples.append((agent._uri_, namespaces['foaf']['name'], name))
except:
# allow blank names where unknown
pass
if agent._foaf.mbox:
n = SubElement(parent, 'email')
mb = agent._foaf.mbox[0]
self.done_triples.append((agent._uri_, namespaces['foaf']['mbox'], mb))
mb = str(mb)
# Strip mailto: (eg not a URI any more)
if mb[:7] == "mailto:":
mb = mb[7:]
n.text = mb
if not isinstance(agent._uri_, BNode):
n = SubElement(parent, 'uri')
n.text = str(agent._uri_)
def make_link(self, parent, rel, t, g):
e = SubElement(parent, 'link', rel=rel, href=str(t))
# look for format, language, extent of t
fmts = list(g.objects(t, namespaces['dc']['format']))
if fmts:
e.set('type', str(fmts[0]))
langs = list(g.objects(t, namespaces['dc']['language']))
if langs:
e.set('hreflang', str(langs[0]))
exts = list(g.objects(t, namespaces['dc']['extent']))
if exts:
e.set('length', str(exts[0]))
titls = list(g.objects(t, namespaces['dc']['title']))
if titls:
e.set('title', str(titls[0]))
def serialize(self, rem, page=-1):
aggr = rem._aggregation_
# Check entire graph is connected
g = self.merge_graphs(rem)
if namespaces.has_key(''):
del namespaces[u'']
root = Element("feed", nsmap=namespaces)
#namespaces[''] = myNamespace
## Aggregation Info
e = SubElement(root, 'id')
e.text = str(aggr.uri)
if not aggr._dc.title:
raise OreException("Atom Serialisation requires title on aggregation")
else:
e = SubElement(root, 'title')
e.text = str(aggr._dc.title[0])
if aggr._dc.description:
e = SubElement(root, 'subtitle')
e.text = str(aggr._dc.description[0])
for who in aggr._dcterms.creator:
e = SubElement(root, 'author')
agent = aggr._agents_[who]
self.make_agent(e, agent)
for bn in aggr._dcterms.contributor:
e = SubElement(root, 'contributor')
agent = aggr._agents_[bn]
self.make_agent(e, agent)
for t in aggr._ore.similarTo:
self.make_link(root, 'related', t, g)
for t in aggr._dcterms.rights:
self.make_link(root, 'license', t, g)
for t in aggr._rdf.type:
e = SubElement(root, 'category', term=str(t))
try:
scheme = list(g.objects(t, namespaces['rdfs']['isDefinedBy']))[0]
e.set('scheme', str(scheme))
except:
pass
try:
label = list(g.objects(t, namespaces['rdfs']['label']))[0]
e.set('label', str(label))
except:
pass
orms = []
for orm in aggr._resourceMaps_:
if orm != rem:
self.make_link(root, 'alternate', orm.uri, g)
orms.append(orm.uri)
for t in aggr._ore.isDescribedBy:
# check not in orms
if not t in orms:
self.make_link(root, 'alternate', t, g)
self.generate_rdf(root, aggr)
## ReM Info
self.make_link(root, 'self', rem.uri, g)
e = SubElement(root, 'updated')
e.text = now()
# ReM Author
if rem._dcterms.creator:
uri = rem._dcterms.creator[0]
e = SubElement(root, 'generator', uri=str(uri))
agent = rem._agents_[uri]
n = agent._foaf.name[0]
e.text = str(n)
self.done_triples.append((uri, namespaces['foaf']['name'], n))
# if no logo, put in nice ORE icon
e = SubElement(root, 'icon')
if aggr._foaf.logo:
e.text = str(aggr._foaf.logo[0])
elif rem._foaf.logo:
e.text = str(rem._foaf.logo[0])
else:
e.text = "http://www.openarchives.org/ore/logos/ore_icon.png"
if rem._dc.rights:
e = SubElement(root, 'rights')
e.text = rem._dc.rights[0]
self.generate_rdf(root, rem)
## Process Entries
for (res, proxy) in aggr._resources_:
entry = SubElement(root, 'entry')
e = SubElement(entry, 'id')
if proxy:
e.text = str(proxy.uri)
else:
e.text = "urn:uuid:%s" % gen_uuid()
e = SubElement(entry, 'link', rel="alternate", href=str(res.uri))
# type = dc:format
fmt = list(g.objects(res.uri, namespaces['dc']['format']))
if fmt:
e.set('type', str(fmt[0]))
if not res._dc.title:
raise ValueError("All entries must have a title for ATOM serialisation")
else:
e = SubElement(entry, 'title')
e.text = str(res._dc.title[0])
for t in res._rdf.type:
e = SubElement(entry, 'category', term=str(t))
try:
scheme = list(g.objects(t, namespaces['rdfs']['isDefinedBy']))[0]
e.set('scheme', str(scheme))
except:
pass
try:
label = list(g.objects(t, namespaces['rdfs']['label']))[0]
e.set('label', str(label))
except:
pass
for a in res._dcterms.creator:
e = SubElement(entry, 'author')
agent = res._agents_[a]
self.make_agent(e, agent)
for a in res._dcterms.contributor:
e = SubElement(entry, 'contributor')
agent = res._agents_[a]
self.make_agent(e, agent)
if res._dcterms.abstract:
e = SubElement(entry, 'summary')
e.text = str(res._dcterms.abstract[0])
# Not sure about this at object level?
for oa in res._ore.isAggregatedBy:
if oa != aggr._uri_:
e = SubElement(entry, 'link', rel="related", href=str(oa))
e = SubElement(entry, 'updated')
e.text = now()
if proxy and proxy._ore.lineage:
e = SubElement(entry, 'link', rel="via", href=str(proxy._ore.lineage[0]))
res._currProxy_ = proxy
self.generate_rdf(entry, res)
res._currProxy_ = None
data = etree.tostring(root)
data = data.replace('\n', '')
data = self.spacesub.sub('', data)
uri = str(rem._uri_)
self.done_triples = []
return ReMDocument(uri, data)
| Python |
try:
import json
except ImportError:
import simplejson as json
from rdflib.parser import Parser
from rdflib import URIRef, BNode, Literal
class JsonParser(Parser):
def __init__(self):
pass
def parse(self, source, sink, **args):
data = source.getByteStream().read()
objs = json.loads(data)
# check if pretty-json
keys = objs.keys()
pretty = 0
bindings = {}
for k in keys:
if k.startswith('xmlns$') or k.startswith('xmlns:'):
pretty = 1
bindings[k[6:]] = objs[k]
for k in keys:
if not k.startswith('xmlns$') and not k.startswith('xmlns:'):
if k[0] == "_" and k[1] in [':', '$']:
# bnode
s = BNode(k[2:])
else:
# uri
s = URIRef(k)
# predicates
preds = objs[k]
for (p, v) in preds.items():
if pretty:
dpidx = p.find('$')
if dpidx == -1:
dpidx = p.find(':')
if dpidx > -1:
pfx = p[:dpidx]
dmn = bindings.get(pfx, '')
if dmn:
pred = URIRef(dmn + p)
else:
raise ValueError("Unassigned Prefix: %s" % pfx)
else:
pred = URIRef(p)
else:
pred = URIRef(p)
for vh in v:
value = vh['value']
vt = vh['type']
if vt == 'literal':
args = {}
lang = vh.get('lang', '')
if lang:
args['lang'] = lang
datatype = vh.get('datatype', '')
if datatype:
args['datatype'] = datatype
val = Literal(value, **args)
elif vt == 'uri':
val = URIRef(value)
elif vt == 'bnode':
val = BNode(val[2:])
sink.add((s, pred, val))
# returns None
| Python |
def skipws(next):
skip = 1
if not skip:
return next
else:
def foo(*args):
tok = next(*args)
if tok.isspace():
tok = next(*args)
return tok
return foo
class ParseError(Exception):
pass
class MiniLex(object):
def __init__(self, data,
whitespace= " \t",
sep="[](){}<>\\/@:;,?=",
quotes="\"",
eof="\n\r"):
self.data = data
self.whitespace=whitespace
self.separators=sep
self.quotes=quotes
self.eof=eof
self.state = 0
self.token = []
self.quoted = ''
self.pos = 0
def __iter__(self):
return self
@skipws
def next(self):
while True:
if self.pos == len(self.data):
if self.token:
tok= ''.join(self.token)
self.token = []
return tok
else:
raise StopIteration
char = self.data[self.pos]
tok = ''
if self.quoted and not char in self.quotes:
self.token.append(char)
self.pos +=1
elif char in self.quotes:
if char == self.quoted:
# we're in quoted text
if self.data[self.pos-1] == "\\":
self.token.append(char)
self.pos += 1
else:
self.token.append(char)
tok = ''.join(self.token)
self.token = []
self.pos += 1
self.quoted=0
self.state=0
return tok
elif self.quoted:
# other quotes
self.token.append(char)
self.pos += 1
else:
# begin quoted text
if self.token:
tok = ''.join(self.token)
self.quoted=char
self.token = [char]
self.pos += 1
self.state = 2
if tok:
return tok
elif char in self.whitespace:
if self.state == 1:
self.token.append(char)
else:
if self.token:
tok = ''.join(self.token)
self.state = 1
self.token = [char]
self.pos += 1
if tok:
return tok
elif char in self.separators:
# can't join seps (currently)
if self.token:
tok = ''.join(self.token)
else:
tok = char
self.pos += 1
self.token = []
self.state = 0
return tok
elif char in self.eof:
if self.token:
return ''.join(self.token)
else:
raise StopIteration
else:
if self.state == 3:
self.token.append(char)
else:
if self.token:
tok = ''.join(self.token)
self.token = [char]
self.state=3
self.pos += 1
if tok:
return tok
class MimeType(object):
def __init__(self):
self.mimetype1 = ""
self.mimetype2 = ""
self.params = {}
self.qval = 1.0
def __str__(self):
#l = [('q', self.qval)]
#l.extend(self.params.items())
# Actually, most likely Don't want to serialize the qval
l = self.params.items()
if l:
return self.mimetype1 + "/" + self.mimetype2 + ";" + ";".join(["%s=%s" % x for x in l])
else:
return self.mimetype1 + "/" + self.mimetype2
def __repr__(self):
return "<MimeType: %s>" % self
def sort2(self):
if self.mimetype1 == "*":
return 0
elif self.mimetype2 == "*":
return 1
elif self.params:
return 2 + len(self.params)
else:
return 2
def matches(self, other):
if other.mimetype1 == self.mimetype1 or other.mimetype1 == '*' or self.mimetype1 == '*':
if other.mimetype2 == self.mimetype2 or other.mimetype2 == '*' or self.mimetype2 == '*':
if other.params == self.params:
return True
return False
class Parser(object):
def __init__(self, ml):
self.ml = ml
def process(self):
mts = []
mt = self.top()
while mt:
if mt.mimetype1 == "*" and mt.mimetype2 == "*" and mt.qval == 1.0:
# downgrade anything to the lowest, otherwise behaviour is
# non deterministic. See apache conneg rules.
mt.qval = 0.001
mts.append(mt)
mt = self.top()
return mts
def top(self):
mt = MimeType()
try:
tok = self.ml.next() # text
except StopIteration:
return None
mt.mimetype1 = tok
sl = self.ml.next() # /
if sl != "/":
raise ParseError("Expected /, got: " + sl)
tok2 = self.ml.next() # html
mt.mimetype2 = tok2
while True:
try:
tok = self.ml.next()
except StopIteration:
return mt
if tok == ',':
return mt
elif tok == ';':
(key, val) = self.param()
if key == "q":
mt.qval = float(val)
else:
mt.params[key] = val
else:
raise ParseError("Expected , or ; got: %r" % tok)
def param(self):
key = self.ml.next()
eq = self.ml.next()
if eq != "=":
raise ParseError("Expected =, got: " + eq)
val = self.ml.next()
return (key, val)
def best(client, server):
# step through client request against server possibilities
# and find highest according to qvals in client
# both client and server are lists of mt objects
# client should be sorted by qval already
# assume that server is unsorted
# AFAICT, if the request has any params, they MUST be honored
# so if params, and no exact match, discard
# And hence */*;params means that params must be matched.
for mtc in client:
# this is most wanted, can we provide?
for mts in server:
if mts.matches(mtc):
return mtc
return None
def parse(data):
lex = MiniLex(data)
p = Parser(lex)
mts = p.process()
mts.sort(key=lambda x: x.sort2(), reverse=True)
mts.sort(key=lambda x: x.qval, reverse=True)
return mts
if __name__ == '__main__':
ml = MiniLex("text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.2")
p = Parser(ml)
mts = p.process()
mts.sort(key=lambda x: x.sort2(), reverse=True)
mts.sort(key=lambda x: x.qval, reverse=True)
ml2 = MiniLex("text/xhtml+xml, text/xml, application/atom+xml, text/html;level=2")
p2 = Parser(ml2)
mts2 = p2.process()
b = best(mts, mts2)
print b
| Python |
import urllib
import time
import re
from rdflib import Namespace
### Configuration Options
### Assign a UUID URI or Blank Node for autogenerating agent URIs
### if not present in data
assignAgentUri = False
#assignAgentUri = True
### Use UUID or oreproxy.org for autogenerating proxy URIs if
### not present in data
proxyType = 'proxy'
# proxyType = 'UUID'
### What to do when encountering an unconnected graph:
unconnectedAction = 'ignore' # produce unconnected graph
#unconnectedAction = 'drop' # drop any unconnected triples silently
#unconnectedAction = 'warn' # print a warning
#unconnectedAction = 'raise' # raise an Exception
# Number of resources per page to serialise
pageSize = 10
# XSLT server to create alternate representation from Atom Entry
atomXsltUri = ""
# atomXsltUri = "http://www.oreproxy.org/alt?what=%s"
build_html_atom_content = False
accept_header = 'application/rdf+xml;q=1.0, application/atom+xml;q=0.9, text/rdf+n3;q=0.8'
protocolUriRe = re.compile("^([s]?http[s]?://|[t]?ftp:/|z39.50r:|gopher:|imap://|news:|nfs:|nntp:|rtsp:)")
def gen_proxy_uuid(res, aggr):
u = gen_uuid()
return "urn:uuid:%s" % u
def gen_proxy_oreproxy(res, aggr):
a = urllib.quote(str(aggr.uri))
ar = urllib.quote(str(res.uri))
return "http://oreproxy.org/r?what=%s&where=%s" % (ar,a)
# Hash must come after function definitions
# Define your own function, set proxyType, and add to hash
proxyTypeHash = {'UUID' : gen_proxy_uuid,
'proxy' : gen_proxy_oreproxy
}
### Namespace Definitions
### If you need a new namespace you MUST add it into this hash
namespaces = {'ore' : Namespace('http://www.openarchives.org/ore/terms/'),
'orex' : Namespace('http://foresite.cheshire3.org/orex/terms/'),
'dc' : Namespace('http://purl.org/dc/elements/1.1/'),
'mesur' : Namespace('http://www.mesur.org/schemas/2007-01/mesur#'),
'dcterms' : Namespace('http://purl.org/dc/terms/'),
'swap' : Namespace('http://purl.org/eprint/type/'),
'rdf' : Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#'),
'foaf' : Namespace('http://xmlns.com/foaf/0.1/'),
'rdfs' : Namespace('http://www.w3.org/2001/01/rdf-schema#'),
'dcmitype' : Namespace('http://purl.org/dc/dcmitype/'),
'atom' : Namespace('http://www.w3.org/2005/Atom'),
'owl' : Namespace('http://www.w3.org/2002/07/owl#'),
'xsd' : Namespace('http://www.w3.org/2001/XMLSchema'),
'xhtml' : Namespace('http://www.w3.org/1999/xhtml'),
'grddl' : Namespace('http://www.w3.org/2003/g/data-view#'),
'swetodblp' : Namespace('http://lsdis.cs.uga.edu/projects/semdis/opus#'),
'skos' : Namespace('http://www.w3.org/2004/02/skos/core#'),
'eurepo' : Namespace('info:eu-repo/semantics/'),
'at' : Namespace('http://purl.org/syndication/atomtriples/1/'),
'iana' : Namespace('http://www.iana.org/assignments/relation/'),
'bibo' : Namespace('http://purl.org/ontology/bibo/'),
'prism' : Namespace('http://prismstandard.org/namespaces/1.2/basic/'),
'vcard' : Namespace('http://nwalsh.com/rdf/vCard#'),
'zotero' : Namespace('http://www.zotero.org/namespaces/exprt#')
}
### Elements commonly used in ORE
### If an element is in this list, you can do object.predicate,
### rather than object._namespace.predicate
# (Not complete for most namespaces, just common terms)
elements = {
'ore' : ['describes', 'isDescribedBy', 'aggregates', 'isAggregatedBy', 'similarTo', 'proxyFor', 'proxyIn', 'lineage'],
'orex' : ['isAuthoritativeFor', 'AnonymousAgent', 'page', 'follows', 'firstPage', 'lastPage'],
'dc' : ['coverage', 'date', 'description', 'format', 'identifier', 'language', 'publisher', 'relation', 'rights', 'source', 'subject', 'title'], # no creator, contributor
'dcterms': ['abstract', 'accessRights', 'accrualMethod', 'accrualPeriodicity', 'accrualPolicy', 'alternative', 'audience', 'available', 'bibliographicCitation', 'conformsTo', 'contributor', 'created', 'creator', 'dateAccepted', 'dateCopyrighted', 'dateSubmitted', 'educationLevel', 'extent', 'hasFormat', 'hasPart', 'hasVersion', 'instructionalMethod', 'isFormatOf', 'isPartOf', 'isReferencedBy', 'isReplacedBy', 'isRequiredBy', 'issued', 'isVersionOf', 'license', 'mediator', 'medium', 'modified', 'provenance', 'references', 'replaces', 'requires', 'rights', 'rightsHolder', 'spatial', 'tableOfContents', 'temporal', 'valid'], # also rights
'foaf' : ['accountName', 'aimChatID', 'birthday', 'depiction', 'depicts', 'family_name', 'firstName', 'gender', 'givenname', 'homepage', 'icqChatID', 'img', 'interest', 'jabberID', 'knows', 'logo', 'made', 'maker', 'mbox', 'member', 'msnChatID', 'name', 'nick', 'openid', 'page', 'phone', 'surname', 'thumbnail', 'weblog', 'yahooChatID'],
'owl' : ['sameAs'],
'rdf' : ['type'],
'rdfs' : ['seeAlso', 'label', 'isDefinedBy'],
'mesur' : ['hasAccess', 'hasAffiliation', 'hasIssue', 'hasVolume', 'used', 'usedBy'],
'skos' : ['prefLabel', 'inScheme', 'broader', 'narrower', 'related', 'Concept', 'ConceptScheme', 'changeNote', 'editorialNote'],
'iana' : ['alternate', 'current' ,'enclosure', 'edit', 'edit-media', 'first', 'last', 'next', 'next-archive', 'previous', 'payment', 'prev-archive', 'related', 'replies', 'service', 'via'], # -self, -license
'bibo' : ['Article', 'Issue', 'Journal', 'pageStart', 'pageEnd', 'volume']
}
### The order in which to search the above hash
namespaceSearchOrder = ['ore', 'dc', 'dcterms', 'foaf', 'rdf', 'rdfs', 'orex', 'owl', 'mesur', 'skos', 'iana']
internalPredicates = [namespaces['orex']['isAuthoritativeFor'],
namespaces['orex']['page'],
]
namespaceElemRe = re.compile('^\{(.+)\}(.+)$')
# UUID generator
try:
# only in Python 2.5+
import uuid
def gen_uuid():
return str(uuid.uuid4())
except:
# Try 4Suite if installed
try:
from Ft.Lib.Uuid import GenerateUuid, UuidAsString
def gen_uuid():
return UuidAsString(GenerateUuid())
except:
# No luck, try to generate using unix command
import commands
def gen_uuid():
return commands.getoutput('uuidgen')
uuidre = re.compile("[0-9a-fA-F-]{36}")
uuid = gen_uuid()
if not uuidre.match(uuid):
# probably sh: command not found or other similar
# weakest version: just build random token
import random
chrs = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
def gen_uuid():
uuidl = []
for y in [8,4,4,4,12]:
for x in range(y):
uuidl.append(random.choice(chrs))
uuidl.append('-')
uuidl.pop(-1) # strip trailing -
return ''.join(uuidl)
def now():
return time.strftime("%Y-%m-%dT%H:%M:%SZ")
def gen_proxy_uri(res, aggr):
# Allow for easier expansion via adding fn to proxyTypeHash
if proxyTypeHash.has_key(proxyType):
return proxyTypeHash[proxyType](res, aggr)
else:
raise KeyError("Unknown proxyType setting: %s" % proxyType)
class OreException(Exception):
pass
| Python |
# Dependencies: rdflib
# lxml
libraryName= "Foresite Toolkit (Python)"
libraryUri = "http://foresite-toolkit.googlecode.com/#pythonAgent"
libraryVersion = "1.1"
libraryEmail = "foresite@googlegroups.com"
__all__ = ['ore', 'utils','parser', 'serializer', 'tripleStore', 'Aggregation', 'ResourceMap', 'AggregatedResource', 'Agent', 'ArbitraryResource', 'Proxy', 'ReMDocument', 'AtomSerializer', 'AtomParser', 'RdfLibSerializer', 'RdfLibParser', 'RdfAParser', 'RDFaSerializer', 'SQLiteTripleStore', 'MySQLTripleStore', 'BdbTripleStore']
from ore import *
from utils import *
from parser import *
from serializer import *
from tripleStore import *
from RDFaSerializer import *
| Python |
from __future__ import generators
from rdflib.serializer import Serializer
from rdflib.term import URIRef
from rdflib.term import Literal
from rdflib.term import BNode
from rdflib.util import uniq
from rdflib.exceptions import Error
#from rdflib.syntax.xml_names import split_uri
from xml.sax.saxutils import quoteattr, escape
class RDFaSerializer(Serializer):
def __init__(self, store):
super(RDFaSerializer, self).__init__(store)
def __bindings(self):
store = self.store
nm = store.namespace_manager
bindings = {}
for predicate in uniq(store.predicates()):
prefix, namespace, name = nm.compute_qname(predicate)
bindings[prefix] = URIRef(namespace)
RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
if "rdf" in bindings:
assert bindings["rdf"]==RDFNS
else:
bindings["rdf"] = RDFNS
for prefix, namespace in bindings.iteritems():
yield prefix, namespace
def serialize(self, stream, base=None, encoding=None, **args):
self.base = base
self.__stream = stream
self.__serialized = {}
encoding = self.encoding
self.write = lambda uni: stream.write(uni.encode(encoding, 'replace'))
# Basic invisible RDFa
# <div about="subject">
# <a rel="predicate" href="object"></a>
# <span property="predicate" content="literal"></span>
xmlns = []
for b in self.__bindings():
xmlns.append('xmlns:%s=\"%s\"' % b)
self.write('<div id="ore:ResourceMap" %s>\n' % ' '.join(xmlns))
for subject in self.store.subjects():
self.subject(subject, 1)
self.write('</div>')
del self.__serialized
def subject(self, subject, depth=1):
if not subject in self.__serialized:
self.__serialized[subject] = 1
indent = " " * depth
if isinstance(subject, URIRef):
uri = quoteattr(self.relativize(subject))
else:
# Blank Node
uri = '"[%s]"' % subject.n3()
self.write('%s<div about=%s>\n' % (indent, uri))
for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth+1)
self.write("%s</div>\n" % (indent))
def predicate(self, predicate, object, depth=1):
indent = " " * depth
qname = self.store.namespace_manager.qname(predicate)
if isinstance(object, Literal):
attributes = ""
if object.language:
attributes += ' xml:lang="%s"'%object.language
#if object.datatype:
# attributes += ' rdf:datatype="%s"'%object.datatype
self.write('%s<span property="%s" content="%s"%s></span>\n' %
(indent, qname, escape(object, {'"':'"'}), attributes))
else:
if isinstance(object, URIRef):
href = quoteattr(self.relativize(object))
else:
# BNode
href= '"[%s]"' % object.n3()
self.write('%s<a rel="%s" href=%s></a>\n' % (indent, qname, href))
| Python |
from __future__ import generators
from rdflib.serializer import Serializer
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib.util import uniq
from rdflib.exceptions import Error
from rdflib.namespace import split_uri
from xml.sax.saxutils import quoteattr, escape
try:
import json
except ImportError:
import simplejson as json
class JsonSerializer(Serializer):
gdataColon = 0
def __init__(self, store):
super(JsonSerializer, self).__init__(store)
self.gdataColon = 0
self.prettyPredName = 0
def serialize(self, stream, base=None, encoding=None, **args):
self.base = base
self.__stream = stream
self.__serialized = {}
self.write = lambda u: stream.write(u.encode(self.encoding, 'replace'))
self.jsonObj = {}
self.initObj()
for subject in self.store.subjects():
self.subject(subject)
srlzd = json.dumps(self.jsonObj, indent=2)
self.write(srlzd)
del self.__serialized
def initObj(self):
pass
def subject(self, subject):
if not subject in self.__serialized:
self.__serialized[subject] = 1
if isinstance(subject, URIRef):
uri = self.relativize(subject)
else:
# Blank Node
uri = '%s' % subject.n3()
if self.gdataColon:
uri = uri.replace(':', '$')
data = {}
for predicate, objt in self.store.predicate_objects(subject):
if self.prettyPredName:
predname = self.store.namespace_manager.qname(predicate)
else:
predname = self.relativize(predicate)
if self.gdataColon:
predname = predname.replace(':', '$')
value = self.value(objt)
if data.has_key(predname):
data[predname].append(value)
else:
data[predname] = [value]
self.jsonObj[uri] = data
def value(self, objt):
data = {}
if isinstance(objt, Literal):
data['type'] = 'literal'
if objt.language:
data['lang'] = objt.language
if objt.datatype:
data['datatype'] = objt.datatype
data['value'] = objt
else:
if isinstance(objt, URIRef):
href = self.relativize(objt)
data['type'] = 'uri'
else:
# BNode
href= '%s' % objt.n3()
if self.gdataColon:
href = href.replace(':', '$')
data['type'] = 'bnode'
data['value'] = href
return data
class PrettyJsonSerializer(JsonSerializer):
def __init__(self, store):
super(PrettyJsonSerializer, self).__init__(store)
self.gdataColon = 1
self.prettyPredName = 1
def __bindings(self):
store = self.store
nm = store.namespace_manager
bindings = {}
for predicate in uniq(store.predicates()):
prefix, namespace, name = nm.compute_qname(predicate)
bindings[prefix] = URIRef(namespace)
RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
if "rdf" in bindings:
assert bindings["rdf"]==RDFNS
else:
bindings["rdf"] = RDFNS
for prefix, namespace in bindings.iteritems():
yield prefix, namespace
def initObj(self):
for b in self.__bindings():
self.jsonObj['xmlns$%s' % b[0]] = '%s' % b[1]
| Python |
from ore import *
from utils import namespaces, OreException, unconnectedAction, protocolUriRe
from lxml import etree
from xml.dom import minidom
from rdflib.parser import StringInputSource
from rdflib import URIRef, plugin, query #, syntax
from rdflib.parser import Parser
plugin.register('sparql', query.Processor,
'rdfextras.sparql.processor', 'Processor')
plugin.register('sparql', query.Result,
'rdfextras.sparql.query', 'SPARQLQueryResult')
plugin.register('json', Parser, 'foresite.JsonParser', 'JsonParser')
class OREParser(object):
# Take some data and produce objects/graph
def __init__(self):
self.strict = False
class RdfLibParser(OREParser):
def set_fields(self, what, graph):
for (pred, obj) in graph.predicate_objects(what.uri):
# assert to what's graph
what.graph.add((what.uri, pred, obj))
def process_graph(self, graph):
# take graph and find objects, split up stuff into graph
# Find ReM/Aggr
lres = list(graph.query("PREFIX ore: <%s> SELECT ?a ?b WHERE {?a ore:describes ?b .}" % namespaces['ore']))
try:
uri_r = lres[0][0]
uri_a = lres[0][1]
except IndexError:
raise OreException("Graph does not have mandatory ore:describes triple")
if self.strict and len(lres) != 1:
raise OreException("Graph must contain exactly one ore:describes triple")
if self.strict and not protocolUriRe.match(uri_r):
raise OreException("Resource Map URI must be protocol-based URI: %s" % uri_r)
if self.strict and not protocolUriRe.match(uri_a):
raise OreException("Aggregation URI must be protocol-based URI: %s" % uri_a)
remc = list(graph.query("PREFIX dcterms: <%s> SELECT ?a WHERE { <%s> dcterms:creator ?a .}" % (namespaces['dcterms'], uri_r)))
if self.strict and not remc:
raise OreException("Graph does not have mandatory 'ResourceMap dcterms:creator ?x' triple")
remc = list(graph.query("PREFIX dcterms: <%s> SELECT ?a WHERE { <%s> dcterms:modified ?a .}" % (namespaces['dcterms'], uri_r)))
if self.strict and not remc:
raise OreException("Graph does not have mandatory 'ResourceMap dcterms:modified timestamp' triple")
rem = ResourceMap(uri_r)
aggr = Aggregation(uri_a)
rem.set_aggregation(aggr)
self.set_fields(rem, graph)
self.set_fields(aggr, graph)
things = {uri_r : rem, uri_a : aggr}
res2 = graph.query("PREFIX ore: <http://www.openarchives.org/ore/terms/> SELECT ?b WHERE {<%s> ore:aggregates ?b .}" % uri_a )
for uri_ar in res2:
uri_ar = uri_ar[0]
if self.strict and not protocolUriRe.match(uri_ar):
raise OreException("Aggregated Resource URI must be protocol-based URI: %s" % uri_ar)
res = AggregatedResource(uri_ar)
things[uri_ar] = res
proxy = list(graph.query("PREFIX ore: <http://www.openarchives.org/ore/terms/> SELECT ?a WHERE {?a ore:proxyFor <%s> .}" % uri_ar ))
try:
uri_p = proxy[0][0]
p = Proxy(uri_p)
p.set_forIn(res, aggr)
things[uri_p] = p
aggr.add_resource(res, p)
self.set_fields(res, graph)
self.set_fields(p, graph)
except IndexError:
aggr.add_resource(res, None)
self.set_fields(res, graph)
allThings = things.copy()
agents = list(graph.query("PREFIX foaf: <%s> PREFIX dcterms: <%s> SELECT ?a WHERE { { ?a foaf:name ?b } UNION { ?a foaf:mbox ?b } UNION { ?b dcterms:creator ?a } UNION { ?b dcterms:contributor ?a } }" % (namespaces['foaf'], namespaces['dcterms'])))
for a_uri in agents:
a_uri = a_uri[0]
a = Agent(a_uri)
allThings[a_uri] = a
self.set_fields(a, graph)
for (subj, pred) in graph.subject_predicates(URIRef(a_uri)):
if things.has_key(subj):
# direct manipulation, as will have already added predicate in set_fields
what = things[subj]
what._agents_[a_uri] = a
# rem and aggr will have default rdf:type triples already
allThings.update(rem.triples)
allThings.update(aggr.triples)
for subj in graph.subjects():
if not allThings.has_key(subj):
# triple needed
ar = ArbitraryResource(subj)
allThings[subj] = ar
# find our graph
for (pred, obj) in graph.predicate_objects(subj):
ar.graph.add((subj, pred, obj))
# find shortest distance to main object to link to main graph
# Breadth First Search
found = 0
checked = {}
tocheck = list(graph.subject_predicates(subj))
while tocheck:
subsubj = tocheck.pop(0)[0]
checked[subsubj] = 1
if things.has_key(subsubj):
things[subsubj]._triples_[ar.uri] = ar
found = 1
break
else:
extd = list(graph.subject_predicates(subsubj))
if extd:
for e in extd[0]:
if not checked.has_key(e):
tocheck.append(e)
if not found:
if unconnectedAction == 'ignore':
# Input graph is not connected!
rem._triples_[ar.uri] = ar
elif unconnectedAction == 'warn':
print "Input Graph Not Connected at: %s" % subj
elif unconnectedAction == 'raise':
raise OreException("Input Graph Not Connected at: %s" % subj)
return rem
def parse(self, doc):
# parse to find graph
graph = Graph()
data = StringInputSource(doc.data)
if doc.format:
graph.parse(data, format=doc.format)
else:
graph.parse(data)
return self.process_graph(graph)
try:
# Try to use more featureful pyRDFa parser
from pyRdfa import parseRDFa, Options
rdfaOptions = Options(warnings=False)
rdfaOptions.warning_graph = None
class RdfAParser(RdfLibParser):
def parse(self, doc):
root = minidom.parse(doc)
graph = parseRDFa(root, doc.uri, options=rdfaOptions)
return self.process_graph(graph)
except ImportError:
# No pyRdfa lib, default to using rdflib's parser
class RdfAParser(RdfLibParser):
pass
class AtomParser(OREParser):
# 1.0's entry style atom ReM
def handle_person(self, elem, what, type):
name = elem.xpath('atom:name/text()', namespaces=namespaces)
mbox = elem.xpath('atom:email/text()', namespaces=namespaces)
uri = elem.xpath('atom:uri/text()', namespaces=namespaces)
if uri:
agent = Agent(uri[0])
else:
agent = Agent()
self.all_objects[agent._uri_] = agent
if name:
agent.name = name[0]
if mbox:
mb = mbox[0]
if mb[:7] != "mailto:":
mb = "mailto:%s" % mb
agent.mbox = mb
what.add_agent(agent, type)
def handle_category(self, elem, what):
uri = elem.attrib['term']
scheme = elem.attrib.get('scheme', '')
label = elem.attrib.get('label', '')
if scheme[:47] == "http://www.openarchives.org/ore/terms/datetime/":
# magic, ignore
return
what._rdf.type = URIRef(uri)
if scheme or label:
t = ArbitraryResource(uri)
if label:
t._rdfs.label = label
if scheme:
t._rdfs.isDefinedBy = scheme
what.add_triple(t)
self.all_objects[t._uri_] = t
def handle_link(self, elem, what):
type = elem.attrib['rel']
if type in ['self', 'license']:
# already handled
return
uri = elem.attrib['href']
format = elem.attrib.get('type', '')
lang = elem.attrib.get('hreflang', '')
title = elem.attrib.get('title', '')
extent = elem.attrib.get('length', '')
# links only apply to aggregations now
# and can be anything
t = None
if type == str(namespaces['ore']['aggregates']):
# Build Aggregated Resource
t = AggregatedResource(uri)
what.aggregates = t._uri_
what._resources_.append((t, None))
t._aggregations_.append((what, None))
# in RDF, if proxy check for AggRes
else:
if type in elements['iana']:
pred = namespaces['iana'][type]
else:
pred = URIRef(type)
# direct graph manipulation rather than try to split
what.graph.add((what._uri_, pred, URIRef(uri)))
if format or lang or title or extent:
t = ArbitraryResource(uri)
if format or lang or title or extent:
self.all_objects[t._uri_] = t
if format:
t._dc.format = format
if lang:
t._dc.language = lang
if title:
t._dc.title = title
if extent:
t._dc.extent = extent
if isinstance(t, ArbitraryResource):
what.add_triple(t)
def handle_rdf(self, elem, what):
# Create AT for @about
try:
uri_at = elem.attrib['{%s}about' % namespaces['rdf']]
except:
uri_at = elem.attrib['{%s}nodeID' % namespaces['rdf']]
if uri_at == str(what.uri):
at = what
elif elem.xpath('ore:proxyFor', namespaces=namespaces):
# proxy
at = Proxy(uri_at)
else:
at = ArbitraryResource(uri_at)
what.add_triple(at)
self.all_objects[at._uri_] = at
for kid in elem:
# set attribute on at from kid
full = kid.tag # {ns}elem
match = namespaceElemRe.search(full)
if match:
name = match.groups()[1]
else:
name = full
val = kid.text
if not val:
# look in @rdf:resource
try:
val = kid.attrib['{%s}resource' % namespaces['rdf']]
val = URIRef(val)
except:
# could be a ref to a blank node
try:
val = kid.attrib['{%s}nodeID' % namespaces['rdf']]
val = URIRef(val)
except:
continue
try:
setattr(at, name, val)
except:
# Probably failed to resolve attribute name -> ns
pass
if isinstance(at, Proxy):
# try to update proxyIn and proxyFor
try:
aggr = self.all_objects[at._ore.proxyIn[0]]
res = self.all_objects[at._ore.proxyFor[0]]
aggr._resources_.remove((res, None))
aggr._resources_.append((res, at))
res._aggregations_.remove((aggr, None))
res._aggregations_.append((aggr, at))
at._resource_ = res
at._aggregation_ = aggr
except KeyError:
# third party proxy
pass
def parse(self, doc):
root = etree.fromstring(doc.data)
self.curr_root = root
graph = Graph()
# first construct aggr and rem
self.all_objects = {}
uri_a = root.xpath("/atom:entry/atom:link[@rel='http://www.openarchives.org/ore/terms/describes']/@href", namespaces=namespaces)
uri_r = root.xpath("/atom:entry/atom:link[@rel='self']/@href", namespaces=namespaces)
rem = ResourceMap(uri_r[0])
aggr = Aggregation(uri_a[0])
rem.set_aggregation(aggr)
self.all_objects[rem._uri_] = rem
self.all_objects[aggr._uri_] = aggr
# Aggregation Info
title = root.xpath("/atom:entry/atom:title/text()", namespaces=namespaces)
aggr._dc.title = title[0]
for auth in root.xpath('/atom:entry/atom:author', namespaces=namespaces):
self.handle_person(auth, aggr, 'creator')
for auth in root.xpath('/atom:entry/atom:contributor', namespaces=namespaces):
self.handle_person(auth, aggr, 'contributor')
for cat in root.xpath('/atom:entry/atom:category', namespaces=namespaces):
self.handle_category(cat, aggr)
for link in root.xpath('/atom:entry/atom:link', namespaces=namespaces):
self.handle_link(link, aggr)
summary = root.xpath("/atom:entry/atom:summary/text()", namespaces=namespaces)
if summary:
aggr._dc.description = summary[0]
# Resource Map Info
aid = root.xpath("/atom:entry/atom:id/text()", namespaces=namespaces)
at = ArbitraryResource(aid[0])
at._dcterms.hasVersion = rem._uri_
rem.add_triple(at)
self.all_objects[at._uri_] = at
updated = root.xpath("/atom:entry/atom:updated/text()", namespaces=namespaces)
if updated:
rem._dcterms.modified = updated[0]
elif self.strict:
raise OreException("Graph does not have mandatory 'ResourceMap dcterms:modified timestamp' triple")
published = root.xpath("/atom:entry/atom:published/text()", namespaces=namespaces)
if published:
rem._dcterms.created = published[0]
rights = root.xpath("/atom:entry/atom:rights/text()", namespaces=namespaces)
if rights:
rem._dc.rights = rights[0]
lic = root.xpath("/atom:entry/atom:link[@rel='license']/@href", namespaces=namespaces)
if lic:
rem._dcterms.rights = URIRef(lic[0])
for rauth in root.xpath('/atom:entry/atom:source/atom:author', namespaces=namespaces):
self.handle_person(rauth, rem, 'creator')
for rauth in root.xpath('/atom:entry/atom:source/atom:contributor', namespaces=namespaces):
self.handle_person(rauth, rem, 'contributor')
for rdf in root.xpath('/atom:entry/ore:triples/rdf:Description', namespaces=namespaces):
try:
about = URIRef(rdf.attrib['{%s}about' % namespaces['rdf']])
except:
# probably a blank node
try:
about = BNode(rdf.attrib['{%s}nodeID' % namespaces['rdf']])
except:
raise
if about in self.all_objects:
self.handle_rdf(rdf, self.all_objects[about])
else:
self.handle_rdf(rdf, aggr)
self.all_objects = {}
return rem
class OldAtomParser(AtomParser):
# 0.9's feed style atom ReM
remMap = {}
aggrMap = {}
entryMap = {}
aggrRels = {}
entryRels = {}
def __init__(self):
self.remMap = {'/atom:feed/atom:updated/text()' :
{'p' : 'modified',
'ns' : 'dcterms'},
'/atom:feed/atom:rights/text()' :
{'p' : 'rights',
'ns' : 'dc'},
"atom:link[@rel='self']/@type" :
{'p' : 'format'},
"atom:link[@rel='self']/@hreflang" :
{'p' : 'language'},
"atom:link[@rel='self']/@title" :
{'p' : 'title'},
"atom:link[@rel='self']/@length" :
{'p' : 'extent'}
}
self.aggrMap = {'/atom:feed/atom:title/text()' :
{'p' : 'title'},
'/atom:feed/atom:icon/text()' :
{'p' : 'logo', 'type' : URIRef},
'/atom:feed/atom:logo/text()' :
{'p' : 'logo', 'type' : URIRef},
'/atom:feed/atom:subtitle/text()' :
{'p' : 'description'}
}
# about aggregated resource
self.entryMap = {'atom:title/text()' : {'p' : 'title'},
'atom:summary/text()' : {'p' : 'abstract', 'ns' : 'dcterms'},
"atom:link[@rel='alternate']/@type" :
{'p' : 'format'},
"atom:link[@rel='alternate']/@hreflang" :
{'p' : 'language'},
"atom:link[@rel='alternate']/@title" :
{'p' : 'title'},
"atom:link[@rel='alternate']/@length" :
{'p' : 'extent'}
}
self.aggrRels = {'related' : {'p' : 'similarTo'},
'alternate' : {'p' : 'isDescribedBy'},
'license' : {'p' : 'rights', 'ns' : 'dcterms'}
}
# self = no map, alternate = URI-AR, via = Proxy
self.entryRels = {'related' : {'p' : 'isAggregatedBy'},
'license' : {'p' : 'rights', 'ns' : 'dcterms'}
}
def handle_person(self, elem, what, type):
name = elem.xpath('atom:name/text()', namespaces=namespaces)
mbox = elem.xpath('atom:email/text()', namespaces=namespaces)
uri = elem.xpath('atom:uri/text()', namespaces=namespaces)
if not uri:
uri = ["urn:uuid:%s" % gen_uuid()]
agent = Agent(uri[0])
if name:
agent.name = name[0]
if mbox:
mb = mbox[0]
if mb[:7] != "mailto:":
mb = "mailto:%s" % mb
agent.mbox = mb
what.add_agent(agent, type)
def handle_category(self, elem, what):
uri = elem.attrib['term']
scheme = elem.attrib.get('scheme', '')
label = elem.attrib.get('label', '')
what._rdf.type = URIRef(uri)
if scheme or label:
t = ArbitraryResource(uri)
if label:
t._rdfs.label = label
if scheme:
t._rdfs.isDefinedBy = scheme
what.add_triple(t)
def handle_link(self, elem, what):
uri = elem.attrib['href']
type = elem.attrib['rel']
format = elem.attrib.get('type', '')
lang = elem.attrib.get('hreflang', '')
title = elem.attrib.get('title', '')
extent = elem.attrib.get('length', '')
# These don't map 'self', 'next', 'archive' etc
if isinstance(what, Aggregation):
pred = self.aggrRels.get(type, '')
else:
pred = self.entryRels.get(type, '')
if pred:
if pred.has_key('ns'):
getattr(what, "_%s" % pred['ns'])
setattr(what, pred['p'], URIRef(uri))
if format or lang or title or extent:
t = ArbitraryResource(uri)
if format:
t._dc.format = format
if lang:
t._dc.language = lang
if title:
t._dc.title = title
if extent:
t._dc.extent = extent
what.add_triple(t)
def handle_rdf(self, elem, what):
# Create AT for @about
uri_at = elem.attrib['{%s}about' % namespaces['rdf']]
if uri_at == str(what.uri):
at = what
else:
at = ArbitraryResource(uri_at)
what.add_triple(at)
for kid in elem:
# set attribute on at from kid
full = kid.tag # {ns}elem
match = namespaceElemRe.search(full)
if match:
name = match.groups()[1]
else:
name = full
val = kid.text
if not val:
# look in @rdf:resource
try:
val = kid.attrib['{%s}resource' % namespaces['rdf']]
val = URIRef(val)
except:
continue
setattr(at, name, val)
def parse(self, doc):
root = etree.fromstring(doc.data)
self.curr_root = root
graph = Graph()
# first construct aggr and rem
try:
del namespaces['']
except:
pass
uri_a = root.xpath('/atom:feed/atom:id/text()', namespaces=namespaces)
uri_r = root.xpath("/atom:feed/atom:link[@rel='self']/@href", namespaces=namespaces)
rem = ResourceMap(uri_r[0])
aggr = Aggregation(uri_a[0])
rem.set_aggregation(aggr)
for (xp, pred) in self.remMap.iteritems():
val = root.xpath(xp, namespaces=namespaces)
for v in val:
if pred.has_key('ns'):
getattr(rem, "_%s" % pred['ns'])
if pred.has_key('type'):
v = pred['type'](v)
setattr(rem, pred['p'], v)
# Handle generator
gen = root.xpath('/atom:feed/atom:generator', namespaces=namespaces)
if gen:
gen = gen[0]
try:
uri = gen.attrib['uri']
except:
uri = "urn:uuid:%s" % gen_uuid()
name = gen.text
agent = Agent(uri)
agent.name = name
rem.add_agent(agent, 'creator')
for (xp, pred) in self.aggrMap.iteritems():
val = root.xpath(xp, namespaces=namespaces)
for v in val:
if pred.has_key('ns'):
getattr(aggr, "_%s" % pred['ns'])
if pred.has_key('type'):
v = pred['type'](v)
setattr(aggr, pred['p'], v)
# Now handle types, agents, links
for auth in root.xpath('/atom:feed/atom:author', namespaces=namespaces):
self.handle_person(auth, aggr, 'creator')
for auth in root.xpath('/atom:feed/atom:contributor', namespaces=namespaces):
self.handle_person(auth, aggr, 'contributor')
for cat in root.xpath('/atom:feed/atom:category', namespaces=namespaces):
self.handle_category(cat, aggr)
for link in root.xpath('/atom:feed/atom:link', namespaces=namespaces):
self.handle_link(link, aggr)
# RDF blocks. Put everything on aggregation
for rdf in root.xpath('/atom:feed/rdf:Description', namespaces=namespaces):
if rdf.attrib['{%s}about' % namespaces['rdf']] == uri_r[0]:
self.handle_rdf(rdf, rem)
else:
self.handle_rdf(rdf, aggr)
for entry in root.xpath('/atom:feed/atom:entry', namespaces=namespaces):
uri_p = entry.xpath('atom:id/text()', namespaces=namespaces)
uri_ar = entry.xpath("atom:link[@rel='alternate']/@href", namespaces=namespaces)
res = AggregatedResource(uri_ar[0])
proxy = Proxy(uri_p[0])
proxy.set_forIn(res, aggr)
aggr.add_resource(res, proxy)
# look for via
via = entry.xpath("atom:link[@rel='via']/@href", namespaces=namespaces)
if via:
proxy._ore.lineage = URIRef(via[0])
for (xp, pred) in self.entryMap.iteritems():
val = entry.xpath(xp, namespaces=namespaces)
for v in val:
if pred.has_key('ns'):
getattr(res, "_%s" % pred['ns'])
if pred.has_key('type'):
v = pred['type'](v)
setattr(res, pred['p'], v)
for auth in entry.xpath('atom:author', namespaces=namespaces):
self.handle_person(auth, res, 'creator')
for auth in entry.xpath('atom:contributor', namespaces=namespaces):
self.handle_person(auth, res, 'contributor')
for cat in entry.xpath('atom:category', namespaces=namespaces):
self.handle_category(cat, res)
for link in entry.xpath('atom:link', namespaces=namespaces):
self.handle_link(link, res)
# RDF blocks. Put everything on aggregation
for rdf in entry.xpath('rdf:Description', namespaces=namespaces):
self.handle_rdf(rdf, res)
return rem
| Python |
from ore import *
from utils import namespaces
from rdflib import URIRef, plugin, store
# Store raw triples into a TripleStore somewhere
class TripleStore(object):
def __init__(self, configuration, db, create):
self.configuration = configuration
self.create = create
self.db = db
if db:
self.store = plugin.get(self.storeType,store.Store)(db)
else:
self.store = plugin.get(self.storeType,store.Store)()
self.store.open(configuration, create)
def close(self):
self.store.close()
def store_aggregation(self, aggr):
# Non memory graph
g = Graph(self.store, aggr.uri)
# Don't want to serialise resource map info?
[g.add(t) for t in aggr._graph_]
for at in aggr._triples_:
[g.add(t) for t in at._graph_]
for c in aggr._agents_:
[g.add(t) for t in c._graph_]
for (res, proxy) in aggr._resources_:
[g.add(t) for t in res._graph_]
if proxy:
[g.add(t) for t in proxy._graph_]
for at in res._triples_:
[g.add(t) for t in at._graph_]
for c in res._agents_:
[g.add(t) for t in c._graph_]
if isinstance(res, Aggregation):
# don't recurse, remove aggregates
for a in res._ore.aggregates:
g.remove((res._uri_, namespaces['ore']['aggregates'], a))
# but keep all other triples
g.commit()
return g
def set_fields(self, what, graph):
for (pred, obj) in graph.predicate_objects(what.uri):
# assert to what's graph
what.graph.add((what.uri, pred, obj))
def load_aggregation(self, identifier):
if not isinstance(identifier, URIRef):
identifier = URIRef(identifier)
graph = Graph(self.store, identifier)
if not len(graph):
aggr = None
else:
uri_a = identifier
aggr = Aggregation(uri_a)
self.set_fields(aggr, graph)
things = {uri_a : aggr}
res2 = graph.query("PREFIX ore: <http://www.openarchives.org/ore/terms/> SELECT ?b WHERE {<%s> ore:aggregates ?b .}" % uri_a )
for uri_ar in res2:
uri_ar = uri_ar[0]
res = AggregatedResource(uri_ar)
things[uri_ar] = res
proxy = list(graph.query("PREFIX ore: <http://www.openarchives.org/ore/terms/> SELECT ?a WHERE {?a ore:proxyFor <%s> .}" % uri_ar ))
try:
uri_p = proxy[0][0]
p = Proxy(uri_p)
p.set_forIn(res, aggr)
things[uri_p] = p
aggr.add_resource(res, p)
self.set_fields(res, graph)
self.set_fields(p, graph)
except IndexError:
aggr.add_resource(res, None)
self.set_fields(res, graph)
allThings = things.copy()
agents = list(graph.query("PREFIX foaf: <%s> PREFIX dcterms: <%s> SELECT ?a WHERE { { ?a foaf:name ?b } UNION { ?a foaf:mbox ?b } UNION { ?b dcterms:creator ?a } UNION { ?b dcterms:contributor ?a } }" % (namespaces['foaf'], namespaces['dcterms'])))
for a_uri in agents:
a_uri = a_uri[0]
a = Agent(a_uri)
allThings[a_uri] = a
self.set_fields(a, graph)
for (subj, pred) in graph.subject_predicates(URIRef(a_uri)):
if things.has_key(subj):
# direct manipulation, as will have already added predicate in set_fields
things[subj]._agents_.append(a)
for at in aggr.triples:
allThings[at.uri] = at
for subj in graph.subjects():
if not allThings.has_key(subj):
# triple needed
ar = ArbitraryResource(subj)
allThings[subj] = ar
# find our graph
for (pred, obj) in graph.predicate_objects(subj):
ar.graph.add((subj, pred, obj))
# find shortest distance to main object to link to main graph
# Breadth First Search
found = 0
checked = {}
tocheck = list(graph.subject_predicates(subj))
while tocheck:
subsubj = tocheck.pop(0)[0]
if things.has_key(subsubj):
things[subsubj]._triples_.append(ar)
found = 1
break
else:
tocheck.extend(graph.subject_predicates(subsubj))
if not found:
# Input graph is not connected!
aggr._triples_.append(ar)
return aggr
# types: Sleepycat, MySQL, SQLite. Others: ZODB, Redland
class SQLiteTripleStore(TripleStore):
def __init__(self, configuration='', db="rdfstore.sql", create=False):
""" configuration = path to create store in """
self.storeType = 'SQLite'
TripleStore.__init__(self,configuration,db,create)
class MySQLTripleStore(TripleStore):
def __init__(self, configuration='', db="rdfstore", create=False):
""" configuration = dbapi2 config:
host=SQL-HOST,password=PASSWORD,user=USER,db=DB"
"""
#"
self.storeType = 'MySQL'
TripleStore.__init__(self,configuration,db,create)
class BdbTripleStore(TripleStore):
def __init__(self, configuration='', db='', create=False):
""" configuration = path to create files in """
#"
self.storeType = 'Sleepycat'
TripleStore.__init__(self, configuration, db, create)
| Python |
#
# Simple Mod_Python handler for validating and transforming
# ORE Resource Maps
#
# apache config:
# <Directory /home/cheshire/install/htdocs/txr>
# SetHandler mod_python
# PythonDebug On
# PythonPath "['/path/to/validateHandler.py/']+sys.path"
# PythonHandler validateHandler
# </Directory>
import cgitb
from mod_python import apache
from mod_python.util import FieldStorage
import re
from foresite import *
from foresite import conneg
from foresite.utils import namespaces, OreException
from foresite.serializer import OldAtomSerializer
from xml.sax._exceptions import SAXParseException
srlzHash = {'rdf.xml' : RdfLibSerializer('xml'),
'pretty.xml' : RdfLibSerializer('pretty-xml'),
'rem.nt' : RdfLibSerializer('nt'),
'rem.n3' : RdfLibSerializer('n3'),
'rem.turtle' : RdfLibSerializer('turtle'),
'rdfa.html' : RdfLibSerializer('rdfa'),
'atom.xml' : AtomSerializer(),
'old-atom.xml' : OldAtomSerializer()}
srlzHash['old-atom.xml'].mimeType = "application/atom+xml;version=0.9"
srlzHash['pretty.xml'].mimeType += ";format=pretty"
p = RdfLibParser()
p.strict = True
ap = AtomParser()
p.strict = True
rdfap = RdfAParser()
p.strict = True
mimeHash = {}
for (k,v) in srlzHash.items():
mimeHash[v.mimeType] = k
mimestr = ', '.join(mimeHash.keys())
mimeList = conneg.parse(mimestr)
protoUriRe = re.compile("^([s]?http[s]?://|[t]?ftp:/|z39.50r:|gopher:|imap://|news:|nfs:|nntp:|rtsp:)")
class validateHandler:
def send(self, text, req, code=200, ct="text/xml"):
req.content_type = ct
req.content_length = len(text)
req.send_http_header()
if type(text) == unicode:
req.write(text.encode('utf-8'))
else:
req.write(text)
def error(self, msg, req):
text = "<html><body><h3>Error</h3><p>%s</p></body></html>" % msg
req.content_type = "text/html"
req.content_length = len(text)
req.send_http_header()
req.write(text)
def handle(self, req):
path = req.uri[5:]
form = FieldStorage(req)
strict = form.get('strict', True)
if strict in ['false', 'False', '0', None, '']:
strict = False
mt = form.get('mimeType', '')
mt = mt.replace(' ', '+')
if not mt:
xtn = form.get('extension', '')
if xtn:
if not srlzHash.has_key(xtn):
# can't continue
raise ValueError(xtn)
else:
mt = srlzHash[xtn].mimeType
if not mt:
try:
wanted = req.headers_in['Accept']
mts = conneg.parse(wanted)
mt = conneg.best(mts, mimeList)
except:
mt = ''
if mt:
xtn = mimeHash[str(mt)]
else:
# default to rdf/xml
xtn = "rdf.xml"
srlz = srlzHash[xtn]
if form.has_key('aggregation'):
uri = form.get('aggregation')
else:
uri = path
if not uri:
data = '<html><body>Instructions etc. goes here</body></html>'
self.send(data, req, ct="text/html");
return
elif not protoUriRe.match(uri):
self.error("Resource Map URI must be a protocol based URI", req)
return
try:
# fetch
rd = ReMDocument(uri)
except Exception, e:
self.error("Could not retrieve Resource Map from '%s': %s" % (uri, e.message), req)
return
try:
# parse
if rd.format == 'atom':
parser = ap
elif rd.format == 'rdfa':
parser = rdfap
else:
parser = p
if not strict:
parser.strict = False
try:
rem = parser.parse(rd)
parser.strict = True
except:
parser.strict = True
raise
except OreException, e:
# get exception message
self.error("Resource Map Invalid: %s" % e.message, req)
return
except SAXParseException, e:
self.error("Could not parse XML: %s (line %s, column %s)" % (e.getMessage(), e.getLineNumber(), e.getColumnNumber()), req)
return
except:
raise
try:
# serialize
rem2 = rem._aggregation_.register_serialization(srlz, 'http://foresite.cheshire3.org/%s#rem' % req.uri)
rd = rem2.get_serialization()
data = rd.data
if srlz == srlzHash['rdfa.html']:
data = '<xhtml xmlns="http://www.w3.org/1999/xhtml"><body><i>Invisible RDFa resource map follows, it must have validated okay. [view source] :)</i>' + data + "</body></xhtml>"
except Exception, e:
self.error("Could not serialize Aggregation to Resource Map: %s" % e.message, req)
return
self.send(data, req, ct=srlz.mimeType)
def handler(req):
# do stuff
myhandler = validateHandler()
try:
myhandler.handle(req)
except:
req.content_type = "text/html"
cgitb.Hook(file = req).handle()
return apache.OK
| Python |
# -*- coding: utf-8 -*-
import urllib2
import re
from foreignsites.parse.parsehtml import parsehtml, HtmlTag
from foreignsites.browser import Browser, BrowserError
SITE = 'ru.wikipedia.org'
#SITE = 'test.tfolder.ru'
WIKI_PAGES = '/wiki/'
RESULT_DIR = 'ru_wikipedia/'
def main():
page = 'А._А._Зализняк'
# browser = Browser()
# browser.open('http://%s%s%s' % (SITE, WIKI_PAGES, page))
# browser.save('RESULT_DIR%s.html' % page)
html = open('w.html').read().decode('utf-8')
res = open('w.txt', 'w')
links = []
header = None
#list(skiptagcontent(skip, parsehtml(html)))
i = 0
for elem, stat in parsehtml(html):
openednames = [opened.name for opened in stat]
for n in openednames:
if n in ('script', 'style'):
continue
if opened == '<div class="printfooter">':
break
if opened == '<h1 class="firstHeading">' and elem.is_text:
header = elem.text
continue
if opened.name == 'a' and elem.is_text:
href = opened.attrs.get('href')
if href and href.startswith('/wiki/'):
links.append((elem.text, urllib2.unquote(href)))
else:
continue
if elem.is_text:
print elem.text.encode('utf-8')
print ('HEADER: %s' % header).encode('utf-8')
print ('LINKS: %s' % '\n'.join(('%s %s' % (k, v)) for k, v in links)).encode('utf-8')
# i += 1
# if elem.is_text and elem.text.strip():
# print '%04d: %s' % (i, elem.text.encode('utf-8'))
# else:
# print '%04d: %s' % (i, elem.__str__().encode('utf-8'))
# #print 'xxxx: %s' % [elem.__str__().encode('utf-8') for elem in stat]
# print 'xxxx: %s' % [elem.name for elem in stat]
# for elem in skiptagcontent(skip, parsehtml(html)):
# if elem.is_tag:
# if elem == '<h1 class="firstHeading">':
# isheader = True
# continue
# elif elem == '</h1>':
# isheader = False
# continue
#
# if elem.is_text:
# if isheader:
# print 'HEADER: %s' % elem.text.encode('utf-8')
# else:
# print elem.text.encode('utf-8')
res.close()
#begin_text = '<h1 class="firstHeading">'
#html = re.sub("\n|\r|\t", "", html)
# html = re.sub(">\s+<", "><", html)
# matches = re.search( self.archiveexp, html )
#print html
main() | Python |
import re
SITE_URL = 'http://pass.rzd.ru'
START_PATH = '/wps/portal/pass/express?STRUCTURE_ID=735'
TICKETS_PAGE_URL_REGEX = re.compile('<a href=[\'"]([^\'"]*)[\'"]>Наличие билетов от станции до станции</a>')
class Agent(type):
def __init__(self, session_id):
self._session_id = session_id
def get_trains(self):
pass
def get_trains_again(self):
pass
def format_time(time):
replaces = {
'.01': u' января',
'.02': u' февряля',
'.03': u' марта',
'.04': u' апреля',
'.05': u' мая',
'.06': u' июня',
'.07': u' июля',
'.08': u' августа',
'.09': u' сентября',
'.10': u' октября',
'.11': u' ноября',
'.12': u' декабря',
}
for k, v in replaces.items():
time.replace(k, v)
return time.lstrip('0')
| Python |
from loggers import *
from random import randint
from datetime import datetime
lj_host = 'www.livejournal.com'
PORT = 80
TIMEOUT_SECONDS = 20
MAX_ATTEMPTS = 5
class LjClient(type):
def __init__(self, user, password, logger_class=None):
self.user, self.password, self.debug_mode = user, password, debug_mode
self.logger = logger_class() or fake_logger()
self.name = ''
self.login()
def get_friends(self):
self.logger.request('Getting friends...')
response = self._request(
mode='getfriends',
)
self.logger.response('Friends: %s' % response)
return response
def delete_friend(self, username):
self.logger.request('Delete: %s' % username)
params = {
'mode': 'editfriends',
'editfriend_delete_%s' % username: 1,
}
response = self._request(**params)
self.logger.response('Done: %s' % response)
def add_friend(self, username):
self.logger.request('Adding: %s' % username)
any_unique_integer = randint(1,25000)
params = {
'mode': 'editfriends',
'editfriend_add_%d_user' % any_unique_integer: username,
}
response = self._request(**params)
self.logger.response('Done: %s' % response)
def post(self, text, subj='',):
self.logger.request('Posting...')
dt = datetime.today()
params = {
'mode': 'postevent',
'event': text.replace('\r', ''),
'lineendings': 'unix', # 'unix', 'pc', 'mac'
'subject': subj,
'year': dt.year,
'mon': u'%02d' % dt.month,
'day': dt.day,
'hour': dt.hour,
'min': u'%02d' % dt.minute,
}
security_params = { 'security': 'public' }
#security_params = { 'security': 'private' }
#security_params = { 'security': 'usemask', 'allowmask': mask }
params.update(security_params)
response = self._request(**params)
self.logger.response('Done: %s' % response)
def login(self):
self.logger.request('Login as %s' % self.user)
response = self._request(
mode='login',
)
## unset($hResponse['access']['count']);
## $this->arrAccess = array_values($hResponse['access']);
self.name = response['name']
self.logger.response('Welcome, %s' % self.name)
# private function checkResponseOk(array $hResponse) {
# if ($hResponse['success'] != 'OK')
# throw new Exception('Failed:' . print_r($hResponse, 1));
# }
#
def _get_login_params(self):
return {
'user': self.user,
'auth_method': 'clear', # deprecated. TODO make better
'hpassword': md5(self.password),
'ver': 1,
}
def _convetFlatResponseToHash(self, response):
result = {}
response.reverse()
while len(response) > 1:
key, val = response.pop(), response.pop()
key_parts = key.split('_')
pointer = result
for i, k in enumerate(key_parts):
is_last = (i == (len(key_parts) - 1))
if is_last:
pointer[k] = val
else:
if not k in pointer:
pointer[k] = {}
pointer = pointer[k]
return result
def _request(self, **kwargs) {
params = self._get_login_params()
params.update(kwargs)
# kwargs
# $connect = new SocketConnect(self::lj_host, self::PORT, self::TIMEOUT_SECONDS);
#
# $content = SocketConnect::composeData($hFields);
#
# if ($isDebug) {
# echo $content;
# return array();
# }
#
# $connect->addLine('POST /interface/flat HTTP/1.0');
# $connect->addLine('Host: ' . self::lj_host);
# $connect->addLine('Content-type: application/x-www-form-urlencoded');
# $connect->addLine('Content-length: ' . strlen($content));
# $connect->addSeparator();
# $connect->addLine($content);
#
# $response = $connect->send();
#
# $this->arrCookies = $response->getCookies();
#
# $hResponse = $this->convetFlatResponseToHash($response->getContentAsLinesArray());
#
# if (empty($hResponse))
# throw new Exception('Empty response');
#
# $this->checkResponseOk($hResponse);
#
# return $hResponse;
# }
class LjPost {
private $subj = '';
public function setSubj($str) { $this->subj = strtr($str, array("\r" => '', "\n" => '')); }
public function getSubj() { return $this->subj; }
private $text = '';
public function setText($str) { $this->text = strtr($str, array("\r" => '')); }
public function getText() { return $this->text; }
private $time = 0;
public function setDate(Date $oDate = null) { $this->time = $oDate ? $oDate->getTimestamp() : time(); }
//public function getTime() { return $this->time; }
public function getYear() { return date('Y', $this->time); }
public function getMon() { return date('n', $this->time); }
public function getDay() { return date('j', $this->time); }
public function getHour() { return date('G', $this->time); }
public function getMin() { return date('i', $this->time); }
private $security = '';
private $allowmask = 0;
public function setSecurityPublic() { $this->security = 'public'; }
public function setSecurityPrivate() { $this->security = 'private'; }
public function setSecurityMask($mask) {
$this->security = 'usemask';
$this->allowmask = $mask;
}
public function getSecurity() { return $this->security; }
public function getAllowmask() { return $this->allowmask; }
public function getLineEndings() { return 'unix'; } // unix, pc, mac
public function checkIsCorrect() {
if (!$this->getText())
throw new Exception('Text cannot be emty!');
return true;
}
}
| Python |
class FakeLogger(object):
def request(self, str):
pass
def response(self, str):
pass
def ScreenLogger(FakeLogger):
def request(self, str):
print ' --> %s' % str
def response(self, str):
print ' <-- %s' % str
| Python |
# -*- coding: utf-8 -*-
import re
from helpers import EntityProcessor, OpenedTagsStat
process_entities = EntityProcessor({
'amp': '&',
'mdash': u'—',
'ndash': u'–',
'minus': u'−',
'hellip': u'…',
'copy': u'©',
'trade': u'™',
})
class HtmlElem(object):
"""
Base entity for html elements: text, comment, tag.
"""
@property
def is_text(self):
return self.__class__ == HtmlText
@property
def is_tag(self):
return self.__class__ == HtmlTag
@property
def is_comment(self):
return self.__class__ == HtmlComment
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
def __unicode__(self):
return self.__str__
class HtmlText(HtmlElem):
"""
Wrapper for html text.
"""
def __init__(self, text, cdata=False):
if text != '' and text.strip() == '':
self.text = u' '
elif cdata:
self.text = text
else:
self.text = process_entities(text)
def __str__(self):
return self.text
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.text == other.text
if isinstance(other, basestring):
return self.text == other
return False
class HtmlComment(HtmlText):
"""
Wrapper for html comment.
"""
def __init__(self, text):
self.text = text
class HtmlTag(HtmlElem):
"""
Wrapper for html tag.
"""
SINGLE_TAGS = (
'br',
'hr',
'img',
'link',
'meta',
'!doctype',
)
_PARSE_ATTRS_RULES = (
('"', re.compile('(\w+)\s*=\s*"([^"]*)"')),
('\'', re.compile("(\w+)\s*=\s*'([^']*)'")),
('', re.compile("(\w+)\s*=\s*([^'\"\s]+)")),
#('', re.compile("([^\s\"'=])()(?=\s|$)")), # TODO single attrs
)
def __init__(self, content):
if content[0] != '<' or content[-1] != '>':
raise Exception('Invalid tag `%s`' % content)
content = content[1:-1]
if content.startswith('/'): # close tag
content = content[1:]
self.is_closed = True
self.is_pair = True
elif content.endswith('/'): # non-pair tag
self.is_closed = True
self.is_pair = False
else: # open tag
self.is_closed = False
self.is_pair = None # get it from name later
bits = filter(None, content.strip().split(None, 1))
self.name = bits[0].lower()
if self.is_pair is None:
self.is_pair = not self.name in self.SINGLE_TAGS
self.attrs = {}
if len(bits) == 2: # has attributes
str_attrs = bits[1]
for strip, pattern in self._PARSE_ATTRS_RULES:
for m in pattern.finditer(str_attrs):
self.attrs[m.group(1)] = m.group(2).strip(strip)
# print self.attrs
def __eq__(self, other):
if isinstance(other, basestring):
try:
return self == self.__class__(other)
except:
return False
elif isinstance(other, HtmlTag):
return (self.name == other.name and
self.is_closed == other.is_closed and
self.is_pair == other.is_pair and
self.attrs == other.attrs)
else:
return False
def __str__(self):
attrs = ''.join((u' %s="%s"' % (k, v)) for k, v in self.attrs.items())
if self.is_pair:
if self.is_closed:
return u'</%s>' % self.name
else:
return u'<%s%s>' % (self.name, attrs)
else:
return u'<%s%s />' % (self.name, attrs)
_LCOMMENT, _RCOMMENT = '<!--', '-->'
_LCDATA, _RCDATA = '<![CDATA[', ']]>'
def htmlchunks(html, return_comments=False):
"""
Generator. Returns string or HtmlTag object
"""
if not isinstance(html, unicode):
try:
html = unicode(html)
except UnicodeDecodeError:
raise Exception, '`html` arg of parsehtml must be unicode'
curpos = 0
size = len(html)
while curpos < size:
starttag = html.find('<', curpos)
if starttag == -1:
text = html[curpos:]
if text: yield HtmlText(text)
break
begin_lcomment = starttag
end_lcomment = begin_lcomment + len(_LCOMMENT)
if html[begin_lcomment:end_lcomment] == _LCOMMENT:
begin_rcomment = html.find(_RCOMMENT, end_lcomment)
if begin_rcomment == -1:
text = html[curpos:].strip()
if text: yield HtmlText(text)
break
text = html[curpos:begin_lcomment]
if text: yield HtmlText(text)
if return_comments:
yield HtmlComment(html[end_lcomment:begin_rcomment])
curpos = begin_rcomment + len(_RCOMMENT)
continue
begin_lcdata = starttag
end_lcdata = begin_lcdata + len(_LCDATA)
if html[begin_lcdata:end_lcdata] == _LCDATA:
begin_rcdata = html.find(_RCDATA, end_lcdata)
if begin_rcdata == -1:
text = html[curpos:].strip()
if text: yield HtmlText(text)
break
text = process_entities(html[curpos:begin_lcdata]) + html[end_lcdata:begin_rcdata]
yield HtmlText(text, cdata=True)
curpos = begin_rcdata + len(_RCDATA)
continue
stoptag = html.find('>', starttag)
if stoptag == -1:
text = html[curpos:]
if text: yield HtmlText(text)
break
# TODO check for incorrect '<' and '>' and re-search tag begin/end
tag = HtmlTag(html[starttag:stoptag + 1])
text = html[curpos:starttag]
if text: yield HtmlText(text)
yield tag
curpos = stoptag + 1
raise StopIteration
def parsehtml(s, return_comments=False):
opened = OpenedTagsStat()
for elem in htmlchunks(s, return_comments=return_comments):
if elem.is_tag and elem.is_pair:
if elem.is_closed:
opened.drop_last(elem)
else:
opened.append(elem)
yield elem, opened
raise StopIteration
| Python |
import re
class EntityProcessor(object):
"""
# Replace all &...; enities.
# &#number; replace automatically, for replacing &keyword;
# need dictionary at constructor.
>>> replacer = EntityProcessor({'amp': '&', 'apos': '`'})
>>> print replacer('A&B')
A&B
"""
_entity_re = re.compile('&([^;\s]+);')
def __init__(self, entity_dict={}):
self._entity_dict = entity_dict
def __call__(self, s):
return self._entity_re.sub(self._replace_entity, s)
def _replace_entity(self, m):
e = m.group(1)
replacement = self._entity_dict.get(e)
if replacement is not None:
return replacement
if e[0] == '#':
try:
num = int(e[1:])
except:
return u'&%s;' % e
return unichr(num)
return u'&%s;' % e
class OpenedTagsStat(object):
"""
Structure for strorage opened tags statistics.
OpenedTagsStat.tags - list of HtmlTag objects
OpenedTagsStat.tagnames - string with lowercased opened names
without attributes. like '<html><body><div><div>'
"""
def __init__(self):
self._tags = []
self._refresh()
@property
def tags(self):
return self._tags
@property
def tagnames(self):
return self._tagnames
@property
def last(self):
return self._last
def drop_last(self, tag):
for i in reversed(xrange(len(self._tags))):
if self._tags[i].name == tag.name:
del self._tags[i]
self._refresh()
break
def append(self, tag):
self._tags.append(tag)
self._refresh()
def _refresh(self):
self._tagnames = ''.join('<%s>' % tag.name for tag in self._tags)
self._last = self._tags and self._tags[-1] or None
| Python |
import unittest
from parse.htmlchunks import htmlchunks, parsehtml, HtmlTag, HtmlText, HtmlComment
#class TestSkiptagcontent(unittest.TestCase):
# def test001(self):
# html = """<style>h1 {color: green}</style> <h1>HELLO!</h1>"""
# chunks = tuple(skiptagcontent(['style'], htmlchunks(html)))
# control_chunks = (
# " ",
# HtmlTag("<h1>"),
# "HELLO!",
# HtmlTag("</h1>"),
# )
# self.assertEqual(chunks, control_chunks)
class TestParseHtml(unittest.TestCase):
def testHtmlTag(self):
self.assertEqual(HtmlTag("<h1>").name, 'h1')
self.assertEqual(HtmlTag("<TITLE>").name, 'title')
def testHtmlTagAttrs(self):
self.assertEqual(HtmlTag("<div id='kk'>").attrs, {'id': 'kk'})
self.assertEqual(HtmlTag('<div id="ll">').attrs, {'id': 'll'})
self.assertEqual(HtmlTag('<div id=mm>').attrs, {'id': 'mm'})
self.assertEqual(HtmlTag('<div id=mm title=none>').attrs, {'id': 'mm', 'title': 'none'})
self.assertEqual(HtmlTag('<div id="mm" title=none>').attrs, {'id': 'mm', 'title': 'none'})
def testHtmlTagPair(self):
self.assertTrue(HtmlTag("<div>").is_pair)
self.assertTrue(HtmlTag("<a>").is_pair)
self.assertFalse(HtmlTag("<img>").is_pair)
self.assertFalse(HtmlTag("<br>").is_pair)
self.assertFalse(HtmlTag("<tag />").is_pair)
def test001(self):
html = """<h1>HELLO!</h1> <p>It's me.</P>"""
chunks = tuple(htmlchunks(html))
control_chunks = (
HtmlTag("<h1>"), "HELLO!", HtmlTag("</h1>"),
" ",
HtmlTag("<p>"), "It's me.", HtmlTag("</p>"),
)
self.assertEqual(chunks, control_chunks)
def test002(self):
html = """<h1 id="kuku">HELLO!</h1>"""
chunks = tuple(htmlchunks(html))
control_chunks = (
HtmlTag("<h1 id='kuku'>"), "HELLO!", HtmlTag("</h1>"),
)
self.assertEqual(chunks, control_chunks)
def test003(self):
html = """text"""
chunks = tuple(htmlchunks(html))
control_chunks = (
HtmlTag('text'),
)
self.assertEqual(chunks, control_chunks)
def test003(self):
html = """ohm<br><br>padme"""
chunks = tuple(htmlchunks(html))
control_chunks = (
'ohm',
HtmlTag('<br>'),
HtmlTag('<br>'),
'padme',
)
self.assertEqual(chunks, control_chunks)
def testComment001(self):
html = """A<!-- B -->C"""
chunks = tuple(htmlchunks(html))
control_chunks = (
'A',
'C',
)
self.assertEqual(chunks, control_chunks)
def testComment002(self):
html = """A<!-- B -->C"""
chunks = tuple(htmlchunks(html, return_comments=True))
control_chunks = (
'A',
HtmlComment(' B '),
'C',
)
self.assertEqual(chunks, control_chunks)
def testComment003(self):
html = """t <b>1</b> A<!-- B -->C"""
chunks = tuple(htmlchunks(html))
control_chunks = (
't ',
HtmlTag('<b>'),
'1',
HtmlTag('</b>'),
' A',
'C',
)
self.assertEqual(chunks, control_chunks)
def testCdata001(self):
html = """z<script><![CDATA[123]]></script>x"""
chunks = tuple(htmlchunks(html))
control_chunks = (
'z',
HtmlTag('<script>'),
'123',
HtmlTag('</script>'),
'x',
)
self.assertEqual(chunks, control_chunks)
def testCdata002(self):
html = """z<script>&<![CDATA[&]]></script>x"""
chunks = tuple(htmlchunks(html))
control_chunks = (
'z',
HtmlTag('<script>'),
'&&',
HtmlTag('</script>'),
'x',
)
self.assertEqual(chunks, control_chunks)
def testEntity(self):
html = """m&m's &"""
chunks = tuple(htmlchunks(html))
control_chunks = (
"m&m's &",
)
self.assertEqual(chunks, control_chunks)
if __name__ == '__main__':
unittest.main()
| Python |
import urllib2
class BrowserError(Exception):
pass
class Browser(object):
"""
Create 'browser'
"""
def __init__(self,
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3',
proxy = None
):
handlers = []
if proxy:
handlers.append(urllib2.ProxyHandler(proxy))
self.opener = urllib2.build_opener(*handlers)
self.opener.addheaders = [('User-agent', user_agent)]
self.current_url = None
self.current_content = None
def open(self, url):
"""
Open url and return page content
"""
try:
c = self.opener.open(url)
content = c.read()
except urllib2.HTTPError, error:
raise Exception, "[HTTP Error %s] %s, on %s]" % (error.code, error.msg, url)
self.current_url = url
self.current_content = content
return self.current_content
def save(self, filename):
"""
Save last opened page to file
"""
if self.current_content is None:
raise BrowserError('No pages was opened!')
open(filename, 'w').write(self.current_content)
| Python |
#!/usr/bin/python
import os
from dcinstall import setup_py, debian, AbstractPackageInfo
from dcinstall.version import get_revisions, get_changelog_core, save_last_revision
class build(AbstractPackageInfo.build):
sources_dir = os.path.realpath(os.path.dirname(__file__))
build_dir = sources_dir
result_dir = sources_dir + '/dist'
last_revision, cur_revision = get_revisions(build)
class info(AbstractPackageInfo):
build = build
version = '1.1-SVN%d' % cur_revision
changelog_core = get_changelog_core(last_revision, cur_revision)
name = 'currencies'
deb_package_name = 'foreignsites-currencies'
maintainer = 'Maxim Oransky'
maintainer_email = 'maxim.oransky@gmail.com'
description = 'Library for work with currencies (load rates)'
url = 'http://code.google.com/p/foreignsites/'
deb_depends = [
'python (>= 2.4)',
]
license = 'GNU Lesser General Public License'
###################
# 1. Classic setup.py installer
setup_py.create(info, skipfiles='.*\.pyc|.*\.pyo|\.svn|\.sqlite')
os.system('python setup.py sdist --formats=gztar')
setup_py.clean()
###################
# 2. Debian package
debian.build_package(
info,
['dist/%s-%s.tar.gz' % (info.name, info.version)],
custom_deb_templates = {
'postinst': '%s/postinst.template' % build.build_dir,
},
)
os.system('mv %s*deb %s' % (info.deb_package_name, build.result_dir))
# on sucess save mark current revision as "last builded"
save_last_revision(build, cur_revision) | Python |
BYR = 974
USD = 840
EUR = 978
UAH = 980
GBP = 826
CHF = 756
JPY = 392
RUR = 810 # before denomination
RUB = 643 # after denomination. 1 RUB = 1000 * RUR
_strcodes = dict((v, k) for k, v in locals().items() if len(k) == 3)
def currency2str(intcode):
try:
return _strcodes[intcode]
except KeyError:
raise 'Unknown currency `%s`' % intcode | Python |
# -*- coding: utf-8 -*-
import datetime
import os
import re
import sys
import urllib2
from codes import *
from errors import CurrencyConvertError
from db import CurrenciesDb
import banks
import conf
def equals(a, b):
"Compare two float or ints by 2 digits"
return round(a - b, 2) == 0
cdb = None
def convert(value, source_currency, target_currency, date=None):
"""
Convert money value from source_currency to target_currency
"""
if value == 0:
return float(value)
global cdb
if cdb is None: # lazy connection
if sys.platform == 'win32' or not os.path.exists('/var/currencies/'):
dbpath = os.path.join(os.path.dirname(__file__), 'currencies.sqlite')
else:
dbpath = '/var/currencies/db.sqlite'
cdb = CurrenciesDb(dbpath)
if date is None:
date = datetime.date.today()
# if date.year <= conf.FIRST_DENOMINATION_YEAR:
# if source_currency == RUB:
try:
rate = specialcase_rate(source_currency, target_currency, date) or \
cdb.get_rate(source_currency, target_currency, date, banks.CBR_RU)
except urllib2.HTTPError:
raise CurrencyConvertError('Cannot connect to currency rates server. Try again.')
if rate is None:
raise CurrencyConvertError('Cannot get rate for source_currency %d, '
'target_currency %d, date %s' % (source_currency, target_currency, date))
return float(value) * rate
def specialcase_rate(source_currency, target_currency, date):
if source_currency == target_currency:
return 1
if source_currency == RUR and target_currency == RUB:
return 0.001
if source_currency == RUB and target_currency == RUR:
return 1000.0
return None
| Python |
import datetime
import unittest
from core import convert, equals, CurrencyConvertError
from codes import *
class TestConvert(unittest.TestCase):
def testEquals(self):
self.assertTrue(equals(1.00, 1))
self.assertTrue(equals(1.001, 1))
self.assertTrue(equals(0.331, 0.33))
self.assertTrue(equals(0.3333333, 0.33))
self.assertTrue(equals(0.334, 0.33))
self.assertTrue(equals(0.339, 0.34))
def test_rub_rur(self):
self.assertEquals(convert(1, RUR, RUR), 1.0)
self.assertEquals(convert(10000.0, RUR, RUB), 10.0)
def test_known_date(self):
v = convert(1, USD, RUB, datetime.date(2000, 10, 16))
self.assertTrue(equals(v, 27.90))
v = convert(1, UAH, RUB, datetime.date(2000, 10, 15))
self.assertTrue(equals(v, 5.10))
def test_current_date(self):
v = convert(1, USD, RUB)
self.assertTrue(v > 0)
def test_1995_date(self):
v = convert(1, USD, RUB, datetime.date(1995, 10, 14))
#self.assertTrue(equals(v, 5.10))
def testErrorCurrency(self):
self.assertRaises(CurrencyConvertError,
lambda: convert(1, 1234567, RUB, datetime.date(2000, 10, 10)))
# def testErrorDate(self):
# try:
# convert(1, EUR, RUB, datetime.date(1983, 10, 10))
# except CurrencyConvertError:
# self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| Python |
CBR_RU = 1
def loader(bank):
if bank == CBR_RU:
from loaders import CbrRateLoader
return CbrRateLoader()
| Python |
import datetime
try:
import pysqlite2.dbapi2 as sqlite3
except ImportError:
import sqlite3
import banks
class DbError(Exception):
pass
class CurrenciesDb(object):
def __init__(self, db_name):
try:
self.conn = sqlite3.connect(db_name)
except sqlite3.OperationalError:
raise DbError('Cannot open `%s`' % db_name)
self._create()
def _create(self):
c = self.conn.cursor()
try:
c.execute('select 1 from rates')
except sqlite3.OperationalError:
c.execute('''
CREATE TABLE rates (
source_quantity float,
source_currency int,
target_quantity float,
target_currency int,
date datetime,
bank int
)''')
self.conn.commit()
def get_rate(self, source_currency, target_currency, date, bank):
"""
Return k, where:
source_currency_money * k = target_currency_money
k = target_currency_money / source_currency_money
"""
for currency1, currency2, is_opposite, force_load in (
(source_currency, target_currency, False, False),
(target_currency, source_currency, True, False),
(source_currency, target_currency, False, True),
(target_currency, source_currency, True, False),
):
if force_load:
if self._num_entries({
'date': date.strftime('%Y-%m-%d'),
'bank': bank,
}) > 0: return None
data = banks.loader(bank)(date)
self.insert_data(data, date, bank)
res = self._fetch_one({
'source_currency': currency1,
'target_currency': currency2,
'date': date.strftime('%Y-%m-%d'),
'bank': bank,
})
if res:
source_currency_money, target_currency_money = res
if is_opposite:
return float(source_currency_money) / target_currency_money
else:
return float(target_currency_money) / source_currency_money
return None
#raise DbError, 'Rate for %d <-> %d not found' % (source_currency, target_currency)
def _fetch_one(self, cond):
c = self.conn.cursor()
sql = '''
SELECT
source_quantity, target_quantity
FROM rates WHERE %s
''' % ' AND '.join('%s = ? ' % k for k in cond)
c.execute(sql, tuple(cond.values()))
rows = c.fetchall()
if rows:
if len(rows) > 1:
raise DbError, 'To many results retured'
return rows[0]
return None
def _num_entries(self, cond):
c = self.conn.cursor()
sql = '''
SELECT COUNT(*)
FROM rates WHERE %s
''' % ' AND '.join('%s = ? ' % k for k in cond)
c.execute(sql, tuple(cond.values()))
return int(c.fetchall()[0][0])
def insert_data(self, data, date, bank):
c = self.conn.cursor()
for sq, sc, tq, tc in data:
c.execute('''
INSERT INTO rates
(source_quantity, source_currency,
target_quantity, target_currency,
date, bank)
VALUES
(?, ?,
?, ?,
?, ?)''',
(sq, sc,
tq, tc,
date.strftime('%Y-%m-%d'), bank))
self.conn.commit()
| Python |
# -*- coding: utf-8 -*-
from codes import *
_ru = {
BYR: (u'белорусский рубль', u'белорусских рубля', u'белорусских рублей'),
USD: (u'доллар США', u'доллара США', u'долларов США'),
EUR: (u'евро', u'евро', u'евро'),
UAH: (u'украинская гривна', u'украинских гривны', u'украинских гривен'),
GBR: (u'фунт стерлингов соединенного королевства', u'фунта стерлингов соединенного королевства', u'фунтов стерлингов соединенного королевства'),
CHF: (u'швейцарский франк', u'швейцарский франка', u'швейцарских франков'),
JPY: (u'японская иена', u'японских иены', u'японских иен'),
}
def ru(currency):
return _ru.get(currency) | Python |
import re
import urllib2
from codes import *
import conf
class CbrRateLoader(object):
#<tr bgcolor="#ffffff"><td align="right" >36</td>
#<td align="left" > AUD<td align="right" >1</td>
#<td> Австралийский доллар</td>
#<td align="right">14,8100</td></tr>
_re_currency = re.compile(
'''<tr bgcolor="#ffffff"><td align="right" >(\d+)(?:</td>)?\s*'''
'''<td align="left" >(?: )*[^<]+(?:</td>)?\s*<td align="right" >(\d+)(?:</td>)?\s*'''
'''<td>(?: )*[^<]+(?:</td>)?\s*'''
'''<td align="right">(\d+,\d+)(?:</td>)?</tr>'''
)
MIN_YEAR = 1992
BASE_CURRENCY = RUB
def __call__(self, date):
"""
Return list of:
(source_quantity, source_currency, target_quantity, target_currency)
"""
url = 'http://cbr.ru/currency_base/D_print.aspx?date_req=%02d.%02d.%d'
html = urllib2.urlopen(url % (date.day, date.month, date.year)).read()
# open('t.html', 'w').write(html)
# html = open('t.html', 'r').read()
# print 'html=',html
if date.year < conf.FIRST_DENOMINATION_YEAR:
process_value = lambda v: float(v.replace(',', '.')) / 1000
else:
process_value = lambda v: float(v.replace(',', '.'))
res = [(int(quantity), int(code), process_value(value), self.BASE_CURRENCY)
for code, quantity, value in self._re_currency.findall(html)]
print 'res = ', res
return res
| Python |
class CurrencyConvertError(Exception):
pass | Python |
FIRST_DENOMINATION_YEAR = 1998 | Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.