code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/env python
# Parse output trace file from a binary compiled with -finstrument-functions flag
# Replaces function addresses with name and location of functions
# ...using addr2line utility from binutils
import sys
import getopt
import re
import os.path
from os import popen
def usage() :
print "Usage: parse.py -p <program binary> -t <trace file>"
print " Example: parse.py -p prog -t trace.out"
def main(argv):
argCount = 0
# Parse command line arguments
try:
opts, args = getopt.getopt(argv, "hp:t:", ["help", "program=", "trace="])
except getopt.GetoptError:
usage()
sys.exit(1)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(1)
elif opt in ("-p", "--program"):
program = arg
if os.path.exists(program):
print "Parse traces from %s ..." % (program)
argCount = argCount +1
else:
print "Inexisent binary file"
sys.exit(2)
elif opt in ("-t", "--trace"):
traceFile = arg
if os.path.exists(traceFile):
print ".. using tracefile %s." % (traceFile)
argCount = argCount +1
else:
print "Inexisent trace file"
sys.exit(2)
if argCount != 2 :
usage()
print "Not enough arguments. Exiting..."
sys.exit(2)
# Search and replace addresses with names and location
cmd = "cat "+traceFile
cmdOut = popen(cmd)
# Parse each line and extract each function once
for eachLine in cmdOut.readlines():
# Keep line indentation
indent = 0
while eachLine[indent] == ' ':
print ' ',
indent = indent + 1
for word in eachLine.rsplit():
funcAddr = re.search("0x[0-9A-F]*", word)
if funcAddr != None:
funcName = ((popen("addr2line -f -e " + program +" " + funcAddr.group(0) + "|head -1")).readlines()[0]).strip()
funcLoc = ((popen("addr2line -s -e " + program +" " + funcAddr.group(0))).readlines()[0]).strip()
print "%s(%s)" % (funcName, funcLoc),
else:
print word,
print "\n",
cmdOut.close()
if __name__ == "__main__":
main(sys.argv[1:])
| Python |
#-----------------------------------------------------------------------------
# ply: lex.py
#
# Author: David M. Beazley (dave@dabeaz.com)
#
# Copyright (C) 2001-2007, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file COPYING for a complete copy of the LGPL.
#-----------------------------------------------------------------------------
__version__ = "2.3"
import re, sys, types
# Regular expression used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Available instance types. This is used when lexers are defined by a class.
# It's a little funky because I want to preserve backwards compatibility
# with Python 2.0 where types.ObjectType is undefined.
try:
_INSTANCETYPE = (types.InstanceType, types.ObjectType)
except AttributeError:
_INSTANCETYPE = types.InstanceType
class object: pass # Note: needed if no new-style classes present
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
def skip(self,n):
self.lexer.skip(n)
# -----------------------------------------------------------------------------
# Lexer class
#
# This class encapsulates all of the methods and data associated with a lexer.
#
# input() - Store a new string in the lexer
# token() - Get the next token
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexdebug = 0 # Debugging mode
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = Lexer()
c.lexstatere = self.lexstatere
c.lexstateinfo = self.lexstateinfo
c.lexstateretext = self.lexstateretext
c.lexstate = self.lexstate
c.lexstatestack = self.lexstatestack
c.lexstateignore = self.lexstateignore
c.lexstateerrorf = self.lexstateerrorf
c.lexreflags = self.lexreflags
c.lexdata = self.lexdata
c.lexpos = self.lexpos
c.lexlen = self.lexlen
c.lextokens = self.lextokens
c.lexdebug = self.lexdebug
c.lineno = self.lineno
c.lexoptimize = self.lexoptimize
c.lexliterals = self.lexliterals
c.lexmodule = self.lexmodule
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
# Set up other attributes
c.begin(c.lexstate)
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile):
tf = open(tabfile+".py","w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
exec "import %s as lextab" % tabfile
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
raise ValueError, "Expected a string"
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not self.lexstatere.has_key(state):
raise ValueError, "Undefined state"
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# token() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Set last match in lexer so that rules can access it if they want
self.lexmatch = m
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
lexpos = m.end()
i = m.lastindex
func,tok.type = lexindexfunc[i]
self.lexpos = lexpos
if not func:
# If no token type was set, it's an ignored token
if tok.type: return tok
break
# if func not callable, it means it's an ignored token
if not callable(func):
break
# If token is processed by a function, call it
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not self.lextokens.has_key(newtok.type):
raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.func_code.co_filename, func.func_code.co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.lexer = self
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError, "No input string given with input()"
return None
# -----------------------------------------------------------------------------
# _validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the filename.
# -----------------------------------------------------------------------------
def _validate_file(filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
print >>sys.stderr, "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
noerror = 0
linen += 1
return noerror
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist):
result = []
for f in funclist:
if f and f[0]:
result.append((f[0].__name__,f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[handle.__name__])
elif handle is not None:
# If rule was specified as a string, we build an anonymous
# callback function to carry out the action
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex]
except Exception,e:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not names.has_key(parts[i]) and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names.keys())
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
error = 0
files = { }
lexobj = Lexer()
lexobj.lexdebug = debug
lexobj.lexoptimize = optimize
global token,input
if nowarn: warn = 0
else: warn = 1
if object: module = object
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for (i,v) in _items:
ldict[i] = v
else:
raise ValueError,"Expected a module or instance"
lexobj.lexmodule = module
else:
# No module given. We might be able to get information from the caller.
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Get the tokens, states, and literals variables (if any)
if (module and isinstance(module,_INSTANCETYPE)):
tokens = getattr(module,"tokens",None)
states = getattr(module,"states",None)
literals = getattr(module,"literals","")
else:
tokens = ldict.get("tokens",None)
states = ldict.get("states",None)
literals = ldict.get("literals","")
if not tokens:
raise SyntaxError,"lex: module does not define 'tokens'"
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise SyntaxError,"lex: tokens must be a list or tuple."
# Build a dictionary of valid token names
lexobj.lextokens = { }
if not optimize:
for n in tokens:
if not _is_identifier.match(n):
print >>sys.stderr, "lex: Bad token name '%s'" % n
error = 1
if warn and lexobj.lextokens.has_key(n):
print >>sys.stderr, "lex: Warning. Token '%s' multiply defined." % n
lexobj.lextokens[n] = None
else:
for n in tokens: lexobj.lextokens[n] = None
if debug:
print "lex: tokens = '%s'" % lexobj.lextokens.keys()
try:
for c in literals:
if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
print >>sys.stderr, "lex: Invalid literal %s. Must be a single character" % repr(c)
error = 1
continue
except TypeError:
print >>sys.stderr, "lex: Invalid literals specification. literals must be a sequence of characters."
error = 1
lexobj.lexliterals = literals
# Build statemap
if states:
if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
print >>sys.stderr, "lex: states must be defined as a tuple or list."
error = 1
else:
for s in states:
if not isinstance(s,types.TupleType) or len(s) != 2:
print >>sys.stderr, "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
error = 1
continue
name, statetype = s
if not isinstance(name,types.StringType):
print >>sys.stderr, "lex: state name %s must be a string" % repr(name)
error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
print >>sys.stderr, "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
error = 1
continue
if stateinfo.has_key(name):
print >>sys.stderr, "lex: state '%s' already defined." % name
error = 1
continue
stateinfo[name] = statetype
# Get a list of symbols with the t_ or s_ prefix
tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
funcsym = { } # Symbols defined as functions
strsym = { } # Symbols defined as strings
toknames = { } # Mapping of symbols to token names
for s in stateinfo.keys():
funcsym[s] = []
strsym[s] = []
ignore = { } # Ignore strings by state
errorf = { } # Error functions by state
if len(tsymbols) == 0:
raise SyntaxError,"lex: no rules of the form t_rulename are defined."
for f in tsymbols:
t = ldict[f]
states, tokname = _statetoken(f,stateinfo)
toknames[f] = tokname
if callable(t):
for s in states: funcsym[s].append((f,t))
elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
for s in states: strsym[s].append((f,t))
else:
print >>sys.stderr, "lex: %s not defined as a function or string" % f
error = 1
# Sort the functions by line number
for f in funcsym.values():
f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
# Sort the strings by regular expression length
for s in strsym.values():
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
regexs = { }
# Build the master regular expressions
for state in stateinfo.keys():
regex_list = []
# Add rules defined by functions first
for fname, f in funcsym[state]:
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
files[file] = None
tokname = toknames[fname]
ismethod = isinstance(f, types.MethodType)
if not optimize:
nargs = f.func_code.co_argcount
if ismethod:
reqargs = 2
else:
reqargs = 1
if nargs > reqargs:
print >>sys.stderr, "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
error = 1
continue
if nargs < reqargs:
print >>sys.stderr, "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
error = 1
continue
if tokname == 'ignore':
print >>sys.stderr, "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
error = 1
continue
if tokname == 'error':
errorf[state] = f
continue
if f.__doc__:
if not optimize:
try:
c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
if c.match(""):
print >>sys.stderr, "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
error = 1
continue
except re.error,e:
print >>sys.stderr, "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
if '#' in f.__doc__:
print >>sys.stderr, "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
# Okay. The regular expression seemed okay. Let's append it to the master regular
# expression we're building
regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
else:
print >>sys.stderr, "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
# Now add all of the simple rules
for name,r in strsym[state]:
tokname = toknames[name]
if tokname == 'ignore':
if "\\" in r:
print >>sys.stderr, "lex: Warning. %s contains a literal backslash '\\'" % name
ignore[state] = r
continue
if not optimize:
if tokname == 'error':
raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
error = 1
continue
if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
print >>sys.stderr, "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
if (c.match("")):
print >>sys.stderr, "lex: Regular expression for rule '%s' matches empty string." % name
error = 1
continue
except re.error,e:
print >>sys.stderr, "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
if '#' in r:
print >>sys.stderr, "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
regex_list.append("(?P<%s>%s)" % (name,r))
if not regex_list:
print >>sys.stderr, "lex: No rules defined for state '%s'" % state
error = 1
regexs[state] = regex_list
if not optimize:
for f in files.keys():
if not _validate_file(f):
error = 1
if error:
raise SyntaxError,"lex: Unable to build lexer."
# From this point forward, we're reasonably confident that we can build the lexer.
# No more errors will be generated, but there might be some warning messages.
# Build the master regular expressions
for state in regexs.keys():
lexre, re_text = _form_master_re(regexs[state],reflags,ldict,toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
if debug:
for i in range(len(re_text)):
print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
# For inclusive states, we need to add the INITIAL state
for state,type in stateinfo.items():
if state != "INITIAL" and type == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = errorf
lexobj.lexerrorf = errorf.get("INITIAL",None)
if warn and not lexobj.lexerrorf:
print >>sys.stderr, "lex: Warning. no t_error rule is defined."
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if warn and not errorf.has_key(s):
print >>sys.stderr, "lex: Warning. no error rule is defined for exclusive state '%s'" % s
if warn and not ignore.has_key(s) and lexobj.lexignore:
print >>sys.stderr, "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
elif stype == 'inclusive':
if not errorf.has_key(s):
errorf[s] = errorf.get("INITIAL",None)
if not ignore.has_key(s):
ignore[s] = ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
print "Reading from standard input (type EOF to end):"
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| Python |
# PLY package
# Author: David Beazley (dave@dabeaz.com)
__all__ = ['lex','yacc']
| Python |
#-----------------------------------------------------------------------------
# ply: yacc.py
#
# Author(s): David M. Beazley (dave@dabeaz.com)
#
# Copyright (C) 2001-2007, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file COPYING for a complete copy of the LGPL.
#
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "2.3"
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
import re, types, sys, cStringIO, md5, os.path
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Available instance types. This is used when parsers are defined by a class.
# it's a little funky because I want to preserve backwards compatibility
# with Python 2.0 where types.ObjectType is undefined.
try:
_INSTANCETYPE = (types.InstanceType, types.ObjectType)
except AttributeError:
_INSTANCETYPE = types.InstanceType
class object: pass # Note: needed if no new-style classes present
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol(object):
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.pbstack = []
self.stack = stack
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def pushback(self,n):
if n <= 0:
raise ValueError, "Expected a positive value"
if n > (len(self.slice)-1):
raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1)
for i in range(0,n):
self.pbstack.append(self.slice[-i-1])
# The LR Parsing engine. This is defined as a class so that multiple parsers
# can exist in the same process. A user never instantiates this directly.
# Instead, the global yacc() function should be used to create a suitable Parser
# object.
class Parser:
def __init__(self,magic=None):
# This is a hack to keep users from trying to instantiate a Parser
# object directly.
if magic != "xyzzy":
raise YaccError, "Can't instantiate Parser. Use yacc() instead."
# Reset internal state
self.productions = None # List of productions
self.errorfunc = None # Error handling function
self.action = { } # LR Action table
self.goto = { } # LR goto table
self.require = { } # Attribute require table
self.method = "Unknown LR" # Table construction method used
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table
goto = self.goto # Local reference to goto table
prod = self.productions # Local reference to production list
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
import lex
lexer = lex.lexer
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input:
lexer.input(input)
# Tokenize function
get_token = lexer.token
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if debug > 1:
print 'state', state
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
if debug:
errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if debug > 1:
print 'action', t
if t is not None:
if t > 0:
# shift a symbol on the stack
if ltype == '$end':
# Error, end of input
sys.stderr.write("yacc: Parse error. EOF\n")
return
statestack.append(t)
state = t
if debug > 1:
sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if debug > 1:
sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
del symstack[-plen:]
del statestack[-plen:]
else:
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
targ = [ sym ]
pslice.slice = targ
# Call the grammar rule with our special slice object
p.func(pslice)
# If there was a pushback, put that on the stack
if pslice.pbstack:
lookaheadstack.append(lookahead)
for _t in pslice.pbstack:
lookaheadstack.append(_t)
lookahead = None
pslice.pbstack = []
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
continue
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
if debug:
sys.stderr.write(errorlead + "\n")
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
continue
# Call an error function here
raise RuntimeError, "yacc: internal parser error!!!\n"
# -----------------------------------------------------------------------------
# === Parser Construction ===
#
# The following functions and variables are used to implement the yacc() function
# itself. This is pretty hairy stuff involving lots of error checking,
# construction of LR items, kernels, and so forth. Although a lot of
# this work is done using global variables, the resulting Parser object
# is completely self contained--meaning that it is safe to repeatedly
# call yacc() with different grammars in the same application.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# validate_file()
#
# This function checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_file(filename):
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
noerror = 0
linen += 1
return noerror
# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
def validate_dict(d):
for n,v in d.items():
if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_':
sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
except StandardError:
pass
# -----------------------------------------------------------------------------
# === GRAMMAR FUNCTIONS ===
#
# The following global variables and functions are used to store, manipulate,
# and verify the grammar rules specified by the user.
# -----------------------------------------------------------------------------
# Initialize all of the global variables used during grammar construction
def initialize_vars():
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, LRitems
global Errorfunc, Signature, Requires
Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
First = { } # A dictionary of precomputed FIRST(x) symbols
Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
LRitems = [ ] # A list of all LR items for the grammar. These are the
# productions with the "dot" like E -> E . PLUS E
Errorfunc = None # User defined error handler
Signature = md5.new() # Digital signature of the grammar rules, precedence
# and other information. Used to determined when a
# parsing table needs to be regenerated.
Requires = { } # Requires list
# File objects used when creating the parser.out debugging file
global _vf, _vfc
_vf = cStringIO.StringIO()
_vfc = cStringIO.StringIO()
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# It has a few required attributes:
#
# name - Name of the production (nonterminal)
# prod - A list of symbols making up its production
# number - Production number.
#
# In addition, a few additional attributes are used to help with debugging or
# optimization of table generation.
#
# file - File where production action is defined.
# lineno - Line number where action is defined
# func - Action function
# prec - Precedence level
# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E'
# then lr_next refers to 'E -> E PLUS . E'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# -----------------------------------------------------------------------------
class Production:
def __init__(self,**kw):
for k,v in kw.items():
setattr(self,k,v)
self.lr_index = -1
self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure
self.lr1_added = 0 # Flag indicating whether or not added to LR1
self.usyms = [ ]
self.lookaheads = { }
self.lk_added = { }
self.setnumbers = [ ]
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return str(self)
# Compute lr_items from the production
def lr_item(self,n):
if n > len(self.prod): return None
p = Production()
p.name = self.name
p.prod = list(self.prod)
p.number = self.number
p.lr_index = n
p.lookaheads = { }
p.setnumbers = self.setnumbers
p.prod.insert(n,".")
p.prod = tuple(p.prod)
p.len = len(p.prod)
p.usyms = self.usyms
# Precompute list of productions immediately following
try:
p.lrafter = Prodnames[p.prod[n+1]]
except (IndexError,KeyError),e:
p.lrafter = []
try:
p.lrbefore = p.prod[n-1]
except IndexError:
p.lrbefore = None
return p
class MiniProduction:
pass
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule.
# The production rule is assumed to be found in the function's docstring.
# This rule has the general syntax:
#
# name1 ::= production1
# | production2
# | production3
# ...
# | productionn
# name2 ::= production1
# | production2
# ...
# -----------------------------------------------------------------------------
def add_production(f,file,line,prodname,syms):
if Terminals.has_key(prodname):
sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
return -1
if prodname == 'error':
sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
return -1
if not _is_identifier.match(prodname):
sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
return -1
for x in range(len(syms)):
s = syms[x]
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
return -1
if not Terminals.has_key(c):
Terminals[c] = []
syms[x] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
return -1
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if Prodmap.has_key(map):
m = Prodmap[map]
sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
return -1
p = Production()
p.name = prodname
p.prod = syms
p.file = file
p.line = line
p.func = f
p.number = len(Productions)
Productions.append(p)
Prodmap[map] = p
if not Nonterminals.has_key(prodname):
Nonterminals[prodname] = [ ]
# Add all terminals to Terminals
i = 0
while i < len(p.prod):
t = p.prod[i]
if t == '%prec':
try:
precname = p.prod[i+1]
except IndexError:
sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
return -1
prec = Precedence.get(precname,None)
if not prec:
sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
return -1
else:
p.prec = prec
del p.prod[i]
del p.prod[i]
continue
if Terminals.has_key(t):
Terminals[t].append(p.number)
# Is a terminal. We'll assign a precedence to p based on this
if not hasattr(p,"prec"):
p.prec = Precedence.get(t,('right',0))
else:
if not Nonterminals.has_key(t):
Nonterminals[t] = [ ]
Nonterminals[t].append(p.number)
i += 1
if not hasattr(p,"prec"):
p.prec = ('right',0)
# Set final length of productions
p.len = len(p.prod)
p.prod = tuple(p.prod)
# Calculate unique syms in the production
p.usyms = [ ]
for s in p.prod:
if s not in p.usyms:
p.usyms.append(s)
# Add to the global productions list
try:
Prodnames[p.name].append(p)
except KeyError:
Prodnames[p.name] = [ p ]
return 0
# Given a raw rule function, this function rips out its doc string
# and adds rules to the grammar
def add_function(f):
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
error = 0
if isinstance(f,types.MethodType):
reqdargs = 2
else:
reqdargs = 1
if f.func_code.co_argcount > reqdargs:
sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
return -1
if f.func_code.co_argcount < reqdargs:
sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
return -1
if f.__doc__:
# Split the doc string into lines
pstrings = f.__doc__.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
##########################################
#hacked by taofei
#using @ add meta more meta info
if p[0] == '@':
pass
##########################################
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
return -1
prodname = lastp
if len(p) > 1:
syms = p[1:]
else:
syms = [ ]
else:
prodname = p[0]
lastp = prodname
assign = p[1]
if len(p) > 2:
syms = p[2:]
else:
syms = [ ]
if assign != ':' and assign != '::=':
sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
return -1
e = add_production(f,file,dline,prodname,syms)
error += e
except StandardError:
sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
error -= 1
else:
sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
return error
# Cycle checking code (Michael Dyck)
def compute_reachable():
'''
Find each symbol that can be reached from the start symbol.
Print a warning for any nonterminals that can't be reached.
(Unused terminals have already had their warning.)
'''
Reachable = { }
for s in Terminals.keys() + Nonterminals.keys():
Reachable[s] = 0
mark_reachable_from( Productions[0].prod[0], Reachable )
for s in Nonterminals.keys():
if not Reachable[s]:
sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
def mark_reachable_from(s, Reachable):
'''
Mark all symbols that are reachable from symbol s.
'''
if Reachable[s]:
# We've already reached symbol s.
return
Reachable[s] = 1
for p in Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r, Reachable)
# -----------------------------------------------------------------------------
# compute_terminates()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def compute_terminates():
'''
Raise an error for any symbols that don't terminate.
'''
Terminates = {}
# Terminals:
for t in Terminals.keys():
Terminates[t] = 1
Terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in Nonterminals.keys():
Terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not Terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not Terminates[n]:
Terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
some_error = 0
for (s,terminates) in Terminates.items():
if not terminates:
if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
some_error = 1
return some_error
# -----------------------------------------------------------------------------
# verify_productions()
#
# This function examines all of the supplied rules to see if they seem valid.
# -----------------------------------------------------------------------------
def verify_productions(cycle_check=1):
error = 0
for p in Productions:
if not p: continue
for s in p.prod:
if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
error = 1
continue
unused_tok = 0
# Now verify all of the tokens
if yaccdebug:
_vf.write("Unused terminals:\n\n")
for s,v in Terminals.items():
if s != 'error' and not v:
sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
if yaccdebug: _vf.write(" %s\n"% s)
unused_tok += 1
# Print out all of the productions
if yaccdebug:
_vf.write("\nGrammar\n\n")
for i in range(1,len(Productions)):
_vf.write("Rule %-5d %s\n" % (i, Productions[i]))
unused_prod = 0
# Verify the use of all productions
for s,v in Nonterminals.items():
if not v:
p = Prodnames[s][0]
sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
unused_prod += 1
if unused_tok == 1:
sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
if unused_tok > 1:
sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
if unused_prod == 1:
sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
if unused_prod > 1:
sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
if yaccdebug:
_vf.write("\nTerminals, with rules where they appear\n\n")
ks = Terminals.keys()
ks.sort()
for k in ks:
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
_vf.write("\nNonterminals, with rules where they appear\n\n")
ks = Nonterminals.keys()
ks.sort()
for k in ks:
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
if (cycle_check):
compute_reachable()
error += compute_terminates()
# error += check_cycles()
return error
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems():
for p in Productions:
lastlri = p
lri = p.lr_item(0)
i = 0
while 1:
lri = p.lr_item(i)
lastlri.lr_next = lri
if not lri: break
lri.lr_num = len(LRitems)
LRitems.append(lri)
lastlri = lri
i += 1
# In order for the rest of the parser generator to work, we need to
# guarantee that no more lritems are generated. Therefore, we nuke
# the p.lr_item method. (Only used in debugging)
# Production.lr_item = None
# -----------------------------------------------------------------------------
# add_precedence()
#
# Given a list of precedence rules, add to the precedence table.
# -----------------------------------------------------------------------------
def add_precedence(plist):
plevel = 0
error = 0
for p in plist:
plevel += 1
try:
prec = p[0]
terms = p[1:]
if prec != 'left' and prec != 'right' and prec != 'nonassoc':
sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
return -1
for t in terms:
if Precedence.has_key(t):
sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
error += 1
continue
Precedence[t] = (prec,plevel)
except:
sys.stderr.write("yacc: Invalid precedence table.\n")
error += 1
return error
# -----------------------------------------------------------------------------
# augment_grammar()
#
# Compute the augmented grammar. This is just a rule S' -> start where start
# is the starting symbol.
# -----------------------------------------------------------------------------
def augment_grammar(start=None):
if not start:
start = Productions[1].name
Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
Productions[0].usyms = [ start ]
Nonterminals[start].append(0)
# -------------------------------------------------------------------------
# first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def first(beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# FOLLOW(x)
# Given a non-terminal. This function computes the set of all symbols
# that might follow it. Dragon book, p. 189.
def compute_follow(start=None):
# Add '$end' to the follow list of the start symbol
for k in Nonterminals.keys():
Follow[k] = [ ]
if not start:
start = Productions[1].name
Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if Nonterminals.has_key(B):
# Okay. We got a non-terminal in a production
fst = first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in Follow[B]:
Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in Follow[p.name]:
if f not in Follow[B]:
Follow[B].append(f)
didadd = 1
if not didadd: break
if 0 and yaccdebug:
_vf.write('\nFollow:\n')
for k in Nonterminals.keys():
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
# -------------------------------------------------------------------------
# compute_first1()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first1():
# Terminals:
for t in Terminals.keys():
First[t] = [t]
First['$end'] = ['$end']
First['#'] = ['#'] # what's this for?
# Nonterminals:
# Initialize to the empty set:
for n in Nonterminals.keys():
First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in Nonterminals.keys():
for p in Prodnames[n]:
for f in first(p.prod):
if f not in First[n]:
First[n].append( f )
some_change = 1
if not some_change:
break
if 0 and yaccdebug:
_vf.write('\nFirst:\n')
for k in Nonterminals.keys():
_vf.write("%-20s : %s\n" %
(k, " ".join([str(s) for s in First[k]])))
# -----------------------------------------------------------------------------
# === SLR Generation ===
#
# The following functions are used to construct SLR (Simple LR) parsing tables
# as described on p.221-229 of the dragon book.
# -----------------------------------------------------------------------------
# Global variables for the LR parsing engine
def lr_init_vars():
global _lr_action, _lr_goto, _lr_method
global _lr_goto_cache, _lr0_cidhash
_lr_action = { } # Action table
_lr_goto = { } # Goto table
_lr_method = "Unknown" # LR method used
_lr_goto_cache = { }
_lr0_cidhash = { }
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
# prodlist is a list of productions.
_add_count = 0 # Counter used to detect cycles
def lr0_closure(I):
global _add_count
_add_count += 1
prodlist = Productions
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lrafter:
if x.lr0_added == _add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = _add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(I,x):
# First we look for a previously cached entry
g = _lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = _lr_goto_cache.get(x,None)
if not s:
s = { }
_lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lrbefore == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
_lr_goto_cache[(id(I),x)] = g
return g
_lr0_cidhash = { }
# Compute the LR(0) sets of item function
def lr0_items():
C = [ lr0_closure([Productions[0].lr_next]) ]
i = 0
for I in C:
_lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms.keys():
g = lr0_goto(I,x)
if not g: continue
if _lr0_cidhash.has_key(id(g)): continue
_lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# Note: This implementation is a complete replacement of the LALR(1)
# implementation in PLY-1.x releases. That version was based on
# a less efficient algorithm and it had bugs in its implementation.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals():
nullable = {}
num_nullable = 0
while 1:
for p in Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not nullable.has_key(t): break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if Nonterminals.has_key(t[1]):
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if Terminals.has_key(a):
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = lr0_goto(C[state],N)
j = _lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if empty.has_key(a):
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if dtrans.has_key((j,t)):
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if Terminals.has_key(p.prod[li]): break # No forget it
if not nullable.has_key(p.prod[li]): break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = lr0_goto(C[j],t) # Go to next set
j = _lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not includedict.has_key(i): includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = sys.maxint
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = sys.maxint
F[stack[-1]] = F[x]
element = stack.pop()
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(C, ntrans, nullable):
FP = lambda x: dr_relation(C,x,nullable)
R = lambda x: reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not p.lookaheads.has_key(state):
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(C):
# Determine all of the nullable nonterminals
nullable = compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = find_nonterminal_transitions(C)
# Compute read sets
readsets = compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(method):
global _lr_method
goto = _lr_goto # Goto array
action = _lr_action # Action array
actionp = { } # Action production array (temporary)
_lr_method = method
n_srconflict = 0
n_rrconflict = 0
if yaccdebug:
sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
_vf.write("\n\nParsing method: %s\n\n" % method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = lr0_items()
if method == 'LALR':
add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
if yaccdebug:
_vf.write("\nstate %d\n\n" % st)
for p in I:
_vf.write(" (%d) %s\n" % (p.number, str(p)))
_vf.write("\n")
for p in I:
try:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
_vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
n_srconflict += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
_vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
n_srconflict +=1
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
# sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
n_rrconflict += 1
_vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, st_actionp[a].number, st_actionp[a]))
_vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,st_actionp[a].number, st_actionp[a]))
else:
sys.stderr.write("Unknown conflict in state %d\n" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if Terminals.has_key(a):
g = lr0_goto(I,a)
j = _lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
sys.stderr.write("Shift/shift conflict in state %d\n" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')):
# We decide to shift here... highest precedence to shift
st_action[a] = j
st_actionp[a] = p
if not rlevel:
n_srconflict += 1
_vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
n_srconflict +=1
_vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
else:
sys.stderr.write("Unknown conflict in state %d\n" % st)
else:
st_action[a] = j
st_actionp[a] = p
except StandardError,e:
print sys.exc_info()
raise YaccError, "Hosed in lr_parse_table"
# Print the actions associated with each terminal
if yaccdebug:
_actprint = { }
for a,p,m in actlist:
if st_action.has_key(a):
if p is st_actionp[a]:
_vf.write(" %-15s %s\n" % (a,m))
_actprint[(a,m)] = 1
_vf.write("\n")
for a,p,m in actlist:
if st_action.has_key(a):
if p is not st_actionp[a]:
if not _actprint.has_key((a,m)):
_vf.write(" ! %-15s [ %s ]\n" % (a,m))
_actprint[(a,m)] = 1
# Construct the goto table for this state
if yaccdebug:
_vf.write("\n")
nkeys = { }
for ii in I:
for s in ii.usyms:
if Nonterminals.has_key(s):
nkeys[s] = None
for n in nkeys.keys():
g = lr0_goto(I,n)
j = _lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
if yaccdebug:
_vf.write(" %-30s shift and go to state %d\n" % (n,j))
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
if yaccdebug:
if n_srconflict == 1:
sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
if n_srconflict > 1:
sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
if n_rrconflict == 1:
sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
if n_rrconflict > 1:
sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
# -----------------------------------------------------------------------------
# ==== LR Utility functions ====
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _lr_write_tables()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def lr_write_tables(modulename=tab_module,outputdir=''):
filename = os.path.join(outputdir,modulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_lr_method = %s
_lr_signature = %s
""" % (filename, repr(_lr_method), repr(Signature.digest())))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in _lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _lr_action.has_key(_x): _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in _lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in _lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _lr_goto.has_key(_x): _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in _lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in Productions:
if p:
if (p.func):
f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
else:
f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len))
else:
f.write(" None,\n")
f.write("]\n")
f.close()
except IOError,e:
print >>sys.stderr, "Unable to create '%s'" % filename
print >>sys.stderr, e
return
def lr_read_tables(module=tab_module,optimize=0):
global _lr_action, _lr_goto, _lr_productions, _lr_method
try:
exec "import %s as parsetab" % module
if (optimize) or (Signature.digest() == parsetab._lr_signature):
_lr_action = parsetab._lr_action
_lr_goto = parsetab._lr_goto
_lr_productions = parsetab._lr_productions
_lr_method = parsetab._lr_method
return 1
else:
return 0
except (ImportError,AttributeError):
return 0
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build the parser module
# -----------------------------------------------------------------------------
def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
global yaccdebug
yaccdebug = debug
initialize_vars()
files = { }
error = 0
# Add parsing method to signature
Signature.update(method)
# If a "module" parameter was supplied, extract its dictionary.
# Note: a module may in fact be an instance as well.
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for i in _items:
ldict[i[0]] = i[1]
else:
raise ValueError,"Expected a module"
else:
# No module given. We might be able to get information from the caller.
# Throw an exception and unwind the traceback to get the globals
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
# Add starting symbol to signature
if not start:
start = ldict.get("start",None)
if start:
Signature.update(start)
# If running in optimized mode. We're going to
if (optimize and lr_read_tables(tabmodule,1)):
# Read parse table
del Productions[:]
for p in _lr_productions:
if not p:
Productions.append(None)
else:
m = MiniProduction()
m.name = p[0]
m.len = p[1]
m.file = p[3]
m.line = p[4]
if p[2]:
m.func = ldict[p[2]]
Productions.append(m)
else:
# Get the tokens map
if (module and isinstance(module,_INSTANCETYPE)):
tokens = getattr(module,"tokens",None)
else:
tokens = ldict.get("tokens",None)
if not tokens:
raise YaccError,"module does not define a list 'tokens'"
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise YaccError,"tokens must be a list or tuple."
# Check to see if a requires dictionary is defined.
requires = ldict.get("require",None)
if requires:
if not (isinstance(requires,types.DictType)):
raise YaccError,"require must be a dictionary."
for r,v in requires.items():
try:
if not (isinstance(v,types.ListType)):
raise TypeError
v1 = [x.split(".") for x in v]
Requires[r] = v1
except StandardError:
print >>sys.stderr, "Invalid specification for rule '%s' in require. Expected a list of strings" % r
# Build the dictionary of terminals. We a record a 0 in the
# dictionary to track whether or not a terminal is actually
# used in the grammar
if 'error' in tokens:
print >>sys.stderr, "yacc: Illegal token 'error'. Is a reserved word."
raise YaccError,"Illegal token name"
for n in tokens:
if Terminals.has_key(n):
print >>sys.stderr, "yacc: Warning. Token '%s' multiply defined." % n
Terminals[n] = [ ]
Terminals['error'] = [ ]
# Get the precedence map (if any)
prec = ldict.get("precedence",None)
if prec:
if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
raise YaccError,"precedence must be a list or tuple."
add_precedence(prec)
Signature.update(repr(prec))
for n in tokens:
if not Precedence.has_key(n):
Precedence[n] = ('right',0) # Default, right associative, 0 precedence
# Look for error handler
ef = ldict.get('p_error',None)
if ef:
if isinstance(ef,types.FunctionType):
ismethod = 0
elif isinstance(ef, types.MethodType):
ismethod = 1
else:
raise YaccError,"'p_error' defined, but is not a function or method."
eline = ef.func_code.co_firstlineno
efile = ef.func_code.co_filename
files[efile] = None
if (ef.func_code.co_argcount != 1+ismethod):
raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
global Errorfunc
Errorfunc = ef
else:
print >>sys.stderr, "yacc: Warning. no p_error() function is defined."
# Get the list of built-in functions with p_ prefix
symbols = [ldict[f] for f in ldict.keys()
if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
and ldict[f].__name__ != 'p_error')]
# Check for non-empty symbols
if len(symbols) == 0:
raise YaccError,"no rules of the form p_rulename are defined."
# Sort the symbols by line number
symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
# Add all of the symbols to the grammar
for f in symbols:
if (add_function(f)) < 0:
error += 1
else:
files[f.func_code.co_filename] = None
# Make a signature of the docstrings
for f in symbols:
if f.__doc__:
Signature.update(f.__doc__)
lr_init_vars()
if error:
raise YaccError,"Unable to construct parser."
if not lr_read_tables(tabmodule):
# Validate files
for filename in files.keys():
if not validate_file(filename):
error = 1
# Validate dictionary
validate_dict(ldict)
if start and not Prodnames.has_key(start):
raise YaccError,"Bad starting symbol '%s'" % start
augment_grammar(start)
error = verify_productions(cycle_check=check_recursion)
otherfunc = [ldict[f] for f in ldict.keys()
if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
if error:
raise YaccError,"Unable to construct parser."
build_lritems()
compute_first1()
compute_follow(start)
if method in ['SLR','LALR']:
lr_parse_table(method)
else:
raise YaccError, "Unknown parsing method '%s'" % method
if write_tables:
lr_write_tables(tabmodule,outputdir)
if yaccdebug:
try:
f = open(os.path.join(outputdir,debugfile),"w")
f.write(_vfc.getvalue())
f.write("\n\n")
f.write(_vf.getvalue())
f.close()
except IOError,e:
print >>sys.stderr, "yacc: can't create '%s'" % debugfile,e
# Made it here. Create a parser object and set up its internal state.
# Set global parse() method to bound method of parser object.
p = Parser("xyzzy")
p.productions = Productions
p.errorfunc = Errorfunc
p.action = _lr_action
p.goto = _lr_goto
p.method = _lr_method
p.require = Requires
global parse
parse = p.parse
global parser
parser = p
# Clean up all of the globals we created
if (not optimize):
yacc_cleanup()
return p
# yacc_cleanup function. Delete all of the global variables
# used during table construction
def yacc_cleanup():
global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, LRitems
global Errorfunc, Signature, Requires
del Productions, Prodnames, Prodmap, Terminals
del Nonterminals, First, Follow, Precedence, LRitems
del Errorfunc, Signature, Requires
global _vf, _vfc
del _vf, _vfc
# Stub that raises an error if parsing is attempted without first calling yacc()
def parse(*args,**kwargs):
raise YaccError, "yacc: No parser built with yacc()"
| Python |
#coding=utf8
#$Id: shell.py 206 2008-06-05 13:17:28Z Filia.Tao@gmail.com $
'''
解释器Shell
可以加载两个不同的引擎,来解释不同的语言
'''
import sys
sys.path.insert(0,"..")
class Shell:
def __init__(self,name,engineer):
self.code = ""
self.engineer = engineer
self.name = name
self.recent_command = []
self.version = "0.1"
def start(self):
print self.name , "shell ", self.version
print "type help for help"
self.run()
def run(self):
while(True):
command = ""
try:
command = raw_input(">>>")
except EOFError,e:
print "Please use 'exit' to exit the shell"
continue
if command:
self.recent_command.append(command)
self.on_command(command)
def on_command(self,command):
command = command.strip()
if command.startswith("clear"):
self.clear()
elif command.startswith("read"):
self.read()
elif command.startswith("load"):
try:
filename = command.split()[1]
if filename[0] == '"' and filename[-1] == '"':
filename = filename[1:-1]
except (IndexError),e:
print "bad argument for load command"
return
self.load(filename)
elif command.startswith("exit"):
self.exit()
elif command.startswith("help"):
self.help()
elif command.startswith("list"):
for i in range(len(self.recent_command)):
print "%d : %s" %(i+1, self.recent_command[i])
print "using exec <number> to exec recent command"
elif command.startswith("exec"):
try:
number = int(command.split()[1])
except (IndexError,ValueError),e:
print "bad argument for exec command"
return
try:
self.on_command(self.recent_command[number-1])
except IndexError,e:
print "No command with id %d" %(number,)
else:
print "error command"
def clear(self):
self.code = ""
def read(self):
self.clear()
while(True):
try:
self.code += raw_input() + '\n'
except EOFError,e:
break
if self.code:
print '----------------------------------------------'
self.engineer.run(self.code)
def exit(self):
sys.exit()
def help(self):
print self.name , "shell ", self.version
print "Author: TaoFei (Filia.Tao@gmail.com)"
print "supported commands:"
print " clear : clear"
print " exit : eixt"
print " exec <command-id>: execute recent command"
print " help : show this message"
print " list : list recent commands"
print " load <filename> : load from file and run the code "
print " read : read from stdin and run the code"
print " using CTRL+Z+Return(Windows) or CTRL+D(*nix) to finish the input"
def load(self,filename):
try:
self.code = open(filename).read()
except IOError,e:
print "cann't load file '%s'" %(filename)
return
self.engineer.run(self.code)
if __name__ == '__main__':
if len(sys.argv) <= 1 or sys.argv[1] not in ("L0","L1", "L2"):
print "Plase choose a engieer"
print "L0, L1 , L2 "
print "eg: %s L1" %(sys.argv[0])
else:
if sys.argv[1] == "L0":
import interpretor.kernelc.interp as engieer
elif sys.argv[1] == "L1":
import interpretor.smallc.interp as engieer
elif sys.argv[1] == "L2":
import interpretor.ooc.interp as engieer
shell = Shell(sys.argv[1],engieer)
if len(sys.argv) > 2:
shell.load(sys.argv[2])
else:
shell.start()
| Python |
#coding=utf8
#$Id: error.py 119 2008-04-27 06:07:41Z Filia.Tao@gmail.com $
'''
错误类型和错误报告系统
'''
class Error(Exception):
error_type = "error" #can be error, warning , notice
def __init__(self, lineno, msg):
self.lineno = lineno
self.msg = msg
def __str__(self):
return "line %s: %s: %s" %(self.lineno, self.error_type, self.msg)
class LangError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
__repr__ = __str__
#词法分析错误
#语法分析错误
class ParseError(Exception):
def __init__(self,token):
self.token = token
def __str__(self):
return "Parser error at line %d token '%s'" %(self.token.lineno, self.token.value)
#静态语义错误
class StaticSemanticError(LangError):
'''静态语义错误'''
pass
class NameError(StaticSemanticError):
def __init__(self, name):
self.name = name
self.msg = "name '%s' is not defined" %(self.name)
class NameReDefineError(StaticSemanticError):
def __init__(self, name):
self.name = name
self.msg = "name '%s' is already defined" %(self.name)
class TypeCheckError(StaticSemanticError):
def __init__(self, op):
self.op = op
self.msg = "type not match for operation '%s'" %(self.op)
class MemberError(StaticSemanticError):
def __init__(self, type, member):
self.type = type
self.member = member
self.msg = "'%s' dont't have '%s' member" %(self.type.name, self.member)
class ParamCountNotMatchError(StaticSemanticError):
def __init__(self, expect_count, real_count):
self.expect_count = expect_count
self.real_count = real_count
self.msg = "param count not match , expect %d , got %d" %(self.expect_count, self.real_count)
#运行时错误
class TCastError(LangError):
def __init__(self,lhs,rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
return "Cant't convert from %s to %s" %(self.lhs, self.rhs)
class TypeError(LangError):
def __init__(self,lhs,rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
return "Except object with type %s , got %s" %(self.lhs,self.rhs)
class UnsupportedOPError(LangError):
def __init__(self,op):
self.op = op
def __str__(self):
return "Unsupported operation '%s'" %(self.op)
class IndexError(LangError):
def __init__(self,ind,range):
self.index = ind
self.range = range
def __str__(self):
return "Index %s out of range [%d,%d)" %(self.index ,self.range[0], self.range[1])
class ChkFailError(LangError):
def __str__(self):
return "Chk failed"
class NullError(LangError):
def __init__(self,obj):
self.obj = obj
def __str__(self):
return "%s is null." %(self.obj)
class EmptyReferenceError(LangError):
def __init__(self,obj):
self.obj = obj
def __str__(self):
return "%s is empty (hanging reference)." %(self.obj)
class EOFError(LangError):
def __init__(self):
pass
def __str__(self):
return "EOF Error." | Python |
#coding=utf8
#$Id: function.py 185 2008-05-23 11:58:16Z Filia.Tao@gmail.com $
import copy
import sys
import interpretor.smallc.lang as lang
import interpretor.smallc.error as error
def copy_ns(ns_dict):
ret = copy.copy(ns_dict)
for x in ret:
ret[x] = copy.copy(ns_dict[x])
return ret
io = {
'input' : sys.stdin,
'output' : sys.stdout,
'input_buff' : "",
'is_eof' : 0
}
def set_io(input_f,output_f):
io['input'] = input_f
io['output'] = output_f
io['input_buff'] = ""
io['is_eof'] = 0
class Namespace:
'名字空间'
def __init__(self,upper = None):
self.upper = upper
self.ns = {}
self.name = "global"
def get(self, name):
if name in self.ns:
return self.ns[name]
elif self.upper:
return self.upper.get(name)
else:
raise error.NameError(name)
def __getitem__(self, key):
return self.get(key)
def set(self, name, value):
if name in self.ns:
raise error.NameReDefineError(name)
else:
self.ns[name] = value
def __setitem__(self, key, value):
self.set(key, value)
def __repr__(self):
return "Namespace %s" %(self.name)
class Function(Namespace):
'''函数'''
def __init__(self, name, upper, ret_type = lang.void):
self.name = name
self.upper = upper
self.ns = {}
self.ret_type = ret_type
self.params = []
self.params_type = []
self.statements = []
def add_param(self, name, type):
'增加形参'
self.params.append(name)
self.params_type.append(type)
self.set(name,lang.Object(type))
def freeze(self):
'冻结, 备份未调用前的名字空间'
self.ns_org = copy_ns(self.ns)
def set_param(self, name, value):
'设置实参'
if name not in self.ns:
raise error.NameError(name)
else:
self.ns[name].op("assign", value)
def call(self, args, inter, line_no = None):
'''用参数args调用函数,解释器对象为inter'''
#print "enter function ", self.name
#将当前函数压入调用栈
inter.call_stack.append((self, line_no))
#保存现场
ns_now = self.ns
self.ns = copy_ns(self.ns_org)
old_current = inter.current_ns
inter.current_ns = self
ret = lang.Object(lang.void)
for i in range(len(self.params)):
self.set_param(self.params[i], args[i])
for st in self.statements:
ret = inter.on_statement(st)
#print "walking statemnt , ", st
#inter._walk_node(st)
#ret = inter.action.get_node_attr(st)
#恢复现场
self.ns = ns_now
inter.current_ns = old_current
#调用栈弹出当前函数
inter.call_stack.pop()
return ret.op("tcast", self.ret_type)
def __repr__(self):
return "Function %s " %self.name
class PrintFunc(Function):
'print 函数 输出'
def __init__(self):
self.name = "print"
self.ret_type = lang.void
def call(self, args, inter, line_no = None):
for x in args:
print >>io['output'], x.to_str(),
return lang.Object(lang.void)
class PrintlnFunc(Function):
'println 函数 输出并换行'
def __init__(self):
self.name = "println"
self.ret_type = lang.void
def call(self,args,inter, line_no = None):
for x in args:
print >>io['output'], x.to_str(),
print >>io['output']
return lang.Object(lang.void)
class ReadFunc(Function):
'''read 函数 读入数据'''
def __init__(self):
self.name = "read"
self.ret_type = lang.intType
def call(self, args, inter, line_no = None):
if io["input_buff"]:
inp = io["input_buff"]
io["input_buff"] = ""
while io["input_buff"] == "":
line = io['input'].readline()
if line == "":
io['is_eof'] = 1
break;
else:
io["input_buff"] = line.strip()
else:
while True:
line = io['input'].readline()
if line == "":
raise error.EOFError()
else:
inp = line.strip()
if inp != "":
break;
try:
inp = int(inp)
except ValueError,e:
raise error.LangError("Invalid Input")
return lang.Object(lang.intType, inp)
class EofFunc(Function):
'''eof 函数,检测输入是否结束'''
def __init__(self):
self.name = "eof"
self.ret_type = lang.intType
def call(self, args, inter, line_no = None):
while not io["input_buff"] and not io['is_eof']:
line = io['input'].readline()
if line == "":
io['is_eof'] = 1
else:
io["input_buff"] = line.strip()
return lang.Object(lang.intType, io["is_eof"])
def get_built_in_ns():
built_in_ns = Namespace()
built_in_ns.ns = {
'int':lang.intType,
'void':lang.void,
'null':lang.null,
'print':PrintFunc(),
'println':PrintlnFunc(),
'read':ReadFunc(),
'eof':EofFunc()
}
return built_in_ns
| Python |
#coding=utf8
#$Id: lex.py 206 2008-06-05 13:17:28Z Filia.Tao@gmail.com $
#Copyright 2007 Tao Fei (filia.tao@gmail.com)
#Released under GPL V3 (or later)
#see http://www.gnu.org/copyleft/gpl.html for more details
import ply.lex as lex
tokens = ('id', 'num',
'orop','andop','eqop', 'neop', 'ltop', 'gtop', 'leop', 'geop', 'chkop', 'incop', 'decop',
'kw_class', 'kw_const', 'kw_var', 'kw_end', 'kw_func', 'kw_while', 'kw_if', 'kw_else', 'kw_new')
literals = ['{', '}', ';', ",", "[", "]", '(', ')', '=', '+', '-', '*', '/', '%' ,'!', '@' ,'.', '?', ':']
#t_assignop = r'='
#
#t_addop = r'+'
#
#t_minusop = r'-'
#
#t_mulop = r'*'
#
#t_divop = r'/'
#
#t_modop = r'%'
#
#t_notop = r'!'
t_orop = r'\|\|'
t_andop = r'&&'
t_eqop = r'=='
t_neop = r'!='
t_ltop = r'<'
t_gtop = r'>'
t_leop = r'<='
t_geop = r'>='
t_incop = r'\+\+'
t_decop = r'--'
def t_num(t):
r'\d+'
try:
t.value = int(t.value,10);
except ValueError:
print "Number %s is bad!" % t.value
t.value = 0
return t
reserved = {
"class": "kw_class",
"const": "kw_const",
"var": "kw_var",
"end": "kw_end",
"func": "kw_func",
"while": "kw_while",
"if": "kw_if",
"else": "kw_else",
"new": "kw_new",
"chk": "chkop"
}
def t_id(t):
r'\b[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'id') # Check for reserved words
return t
def t_newline(t):
r'\n'
t.lexer.lineno += 1
t_ignore = ' \r\t\v'
# Comments
def t_comment1(t):
r'//.*'
pass
def t_comment2(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
def t_error(t):
print "Illegal character '%s' on line %d " % (t.value[0],t.lexer.lineno)
# Compute column.
# input is the input text string
# token is a token instance
def find_column(input,token):
i = token.lexpos
while i > 0:
if input[i] == '\n': break
i -= 1
column = (token.lexpos - i)+1
return column
test5 = '''
class Link { Node prob; Link next }
class Node { int level; int[] board }
func void main(){ var int i, j; Link a; Node b end
b = new Node;
b.board = new int[2];
b.board[1] = 2;
b.board[0] = 3;
print (b.board[0]);
print (b.board[1]);
b.level = 1;
print(b.level)
}
'''
test = '''
func int gcd(int a, int b){ var int r end
chk (a>1 && b>1);
while (b!=0)(r=a%b; a=b; b=r);
a }
func void main(){ var int i, j end
while (!eof())( i=read();
if (!eof())( j=read();
print(gcd(i, j)))) }
'''
test3= '''
var int[] a; int n end
func int pow(int m){
var int ans, i end ans=1; i=0;
while (i++<n ) ans=ans*m; ans }
func int r2n(){
var int ans, i end ans=0; i=n-1;
while (i>=0) ans=ans*10+a[i--]; ans }
func int next(){
var int ans, i end i=0; ans=1;
while (i<n && a[i]==9) a[i++]=1;
if (i<n) ++a[i] else ans=0; ans }
func void narci() {
var int i, j, k end i=0;
while (i<n) a[i++]=1;
while (next())( k=r2n(); i=0; j=0;
while (j<n) i=i+pow(a[j++]);
if (k==i) println(i)) }
func void main() { a=new int[10]; n=3;
while (n<=5)( narci(); ++n ) }
'''
test11 = '''
class Link { Node prob; Link next }
class Node { int level; int[] board }
const n=8;
var Link head; int num, bound end
func void depthfirst(){
var
Link t;
Node p, sub;
int m, i
end
p=head.prob;
sub=null;
m=subnodenum(p);
i=0;
if (target(p))( outlist(head); ++num );
while (num<bound && i<m)
if ((sub=down(p, i++))!=null)(
t=new Link;
t.prob=sub;
t.next=head;
head=t;
depthfirst();
head=head.next
)
}
func int subnodenum(Node this){ if (this.level<n) n else 0 }
func int target(Node this){ this.level==n }
func Node down(Node this, int i){
var Node ans; int norm; int k end
ans=null; norm=this.level<n; k=this.level-1;
while (norm && k>=0)(
norm=i!=this.board[k] && i+this.level-k!=this.board[k]
&& i-this.level+k!=this.board[k];
--k
);
if (norm)(
this.board[this.level]=i;
ans=new Node;
ans.level=this.level+1;
ans.board=new int[ans.level+1];
k=0;
while (k<ans.level)(
ans.board[k]=this.board[k]; ++k
)
);
ans
}
func void outlist(Link this){
if (this.next!=null)
outlist(this.next);
outnode(this.prob)
}
func void outnode(Node this){
var int i end
if (this.level==n)(
i=0;
while (i<n)
print(this.board[i++]);
println()
)
}
func void main(){
bound=1;
head=new Link;
head.prob=new Node;
head.prob.board=new int[1];
head.next = null;
depthfirst()
}
'''
test111 = '''
class Link { Node prob; Link next }
class Node { int level; int[] board }
func int bin_search(int[] ar, int start, int en, int target){
var int m,ret;int[] t end
if (start > en)
ret = -1
else (
m = (start + en) / 2;
if (ar[m] == target)
ret = m
else(
if (ar[m] < target)
ret = bin_search(ar, m+1 , en, target)
else
ret = bin_search(ar, start, m-1, target)
)
);
ret
}
func void main(){
var int [] ar end
ar = new int[10];
ar[0] = 0;
ar[1] = 1;
ar[2] = 2;
ar[3] = 3;
ar[4] = 4;
ar[5] = 5;
ar[6] = 6;
ar[7] = 7;
ar[8] = 8;
ar[9] = 9;
print (bin_search(ar,0,9,7));
//ar = 0
}
'''
test1 = '''
func void main(){
var int [] ar; int b end
//b = null;
ar = new int[10];
ar[1] = 1;
//b[1] = 3;
// b = ar;
b = 2;
b && b;
b || b;
b + b;
b - b;
b * b;
b / b;
b % b;
b < b;
b > b;
b <= b;
b >= b;
b == b;
b != b;
!b;
-b;
print(a,b,c,d);
chk(b);
}
'''
test1 = '''
func void main(){
var int i;int[] ar end
i = 0;
i = i+i;
ar = new int[3];
ar;
ar[0];
ar[1] = i;
}
'''
lex.lex()
if __name__ == '__main__':
lexer = lex.lex()
lexer.input(test)
while 1:
tok = lexer.token()
if not tok: break
print tok
| Python |
#coding=utf8
#$Id: interp.py 203 2008-06-04 11:55:03Z Filia.Tao@gmail.com $
'''
SmallC 语言解释器
工作在抽象语法树上。
SmallC 不允许函数嵌套。
'''
import operator
import sys
import interpretor.smallc.lang as lang
import interpretor.smallc.error as error
from interpretor.smallc.function import Function,get_built_in_ns,copy_ns,set_io
from interpretor.smallc.parse import parse
from interpretor.smallc.lex import test
from interpretor.ast import Node,Leaf,BaseASTWalker,BaseAnnotateAction
from interpretor.common import CommonOPAnnotate as OPAnnotate
class MoreParser:
'''在AST 基础上进一步处理,根据声明语句建立名字空间和函数
这里很大程度需要计算继承属性。(比如当前的所在的结构体,函数)
但是这个结果似乎不合适标准在AST 上.
'''
def __init__(self,ast):
self.ast = ast
self.global_ns = get_built_in_ns()
self.current_ns = self.global_ns
self.errors = []
def add_error(self, e):
self.errors.append(error.Error(self.current_token.lineno, str(e)))
def parse(self):
'''walk the ast , build the golbal namespace'''
#类定义
for n in self.ast.query("class_decls>classdecl"):
name = self.on_token(n.child("id"))
struct = lang.Struct(name)
self.global_ns.set(name, struct)
for n in self.ast.query("class_decls>classdecl"):
name = self.on_token(n.child("id"))
struct = self.global_ns.get(name)
for x in n.query("decllist>decl"):
self.on_decl_inside_class(x, struct)
#常量
for n in self.ast.query("condecl>condef"):
self.on_condef(n,self.global_ns)
#变量
for decl in self.ast.query("vdecl>decllist>decl"):
self.on_decl(decl,self.global_ns)
#函数
for n in self.ast.query("fdefs>fdef"):
self.on_fdef(n,self.global_ns)
def on_decl(self,node,ns):
'(在函数中的)变量声明'
type = self.on_type(node.child("type"))
for id in node.query("idlist>id"):
try:
ns.set(id.value,lang.Object(type))
except error.StaticSemanticError, e:
self.add_error(e)
def on_decl_inside_class(self,node,struct):
'在类中的变量声明'
type = self.on_type(node.child("type"))
for id in node.query("idlist>id"):
struct.add_member(type,id.value)
def on_paradecl(self,node,ns):
'函数形参定义'
type = self.on_type(node.child("type"))
name = self.on_token(node.child("id"))
ns.add_param(name,type)
def on_type(self,node):
'类型'
base = self.on_token(node.child("id"))
try:
base_type = self.current_ns.get(base)
except error.StaticSemanticError, e:
self.add_error(e)
return None
if node.dim > 0:
return lang.Array(base_type, node.dim)
else:
return base_type
def on_condef(self,node,ns):
'常量定义'
name = self.on_token(node.child("id"))
value = self.on_token(node.child("num"))
if node.child("-"):
value = -value
try:
ns.set(name, lang.ConstObject(lang.intType,value)) # type use lang.intType
except error.StaticSemanticError, e:
self.add_error(e)
def on_fdef(self,node,ns):
'函数定义'
name = self.on_token(node.query("head>id")[0])
fns = Function(name,self.current_ns)
fns.ret_type = self.on_type(node.child("type"))
try:
ns.set(name,fns)
except error.StaticSemanticError, e:
self.add_error(e)
return None
for para in node.query("head>paralist>paradecl"):
self.on_paradecl(para,fns)
for decl in node.query("funbody>vdecl>decllist>decl"):#vdecl > decllist > decls
self.on_decl(decl,fns)
fns.statements = node.query("funbody>stlist>st")
fns.freeze() #冻结函数,备份原始的名字空间
def on_token(self,node):
'终结符'
self.current_token = node #记录当前终结符。调试用
return node.value
class Interpreter:
'''递归解释器'''
def __init__(self,ast,global_ns):
self.ast = ast
self.global_ns = global_ns
self.current_ns = None
self.current_token = None
self.call_stack = []
def run(self):
self.current_ns = self.global_ns
try:
self.current_ns.get("main").call([],self)
except error.LangError,e:
if self.current_token is None:
print >>sys.stderr,e
else:
print >>sys.stderr, "error at line %d near token '%s': %s" %(self.current_token.lineno,self.current_token.value,str(e))
print >>sys.stderr, "calling stack "
for x in self.call_stack:
if x[1]:
print >>sys.stderr, "call %s at line %s" %(x[0], x[1])
else:
print >>sys.stderr, "call %s" % (x[0])
except StandardError,e:
print >>sys.stderr, "Interpretor inner error "
raise
def on_node(self, node):
if isinstance(node, Leaf):
return self.on_token(node)
else:
if hasattr(self, 'on_' + node.type):
return getattr(self, 'on_' + node.type)(node)
else:
if len(node) == 1:
return self.on_node(node.child(0))
else:
print >>sys.stderr, "not such node ", node.type, node
def on_statement(self, node):
return self.on_node(node.child(0))
def on_cond(self,node):
#print node
exp = node.child(2)
st = node.child(4)
if self.on_exp(exp):
return self.on_statement(st)
elif len(node) > 6:
return self.on_statement(node.child(6))
return lang.Object(lang.void)
def on_loop(self,node):
#print node
exp = node.child(2)
ret = lang.Object(lang.void)
while self.on_exp(exp):
if len(node) > 4:
ret = self.on_statement(node.child(4))
return ret
def on_exp(self,node):
return self.on_node(node.child(0))
def on_binexp(self, node):
lhs = self.on_node(node.child(0))
self.on_node(node.child(1))
op_name = node.child(1).get_attr('op_name')
rhs = self.on_node(node.child(2))
return lhs.op(op_name, rhs)
on_assignexp = on_binexp
def on_orexp(self,node):
lhs = self.on_node(node.child(0))
if lhs:
return lhs
self.on_node(node.child(1))
rhs = self.on_node(node.child(2))
return lhs.op("or",rhs)
def on_andexp(self,node):
lhs = self.on_node(node.child(0))
if not lhs:
return lhs
self.on_node(node.child(1))
rhs = self.on_node(node.child(2))
return lhs.op("and", rhs)
def on_uniexp(self,node):
op_name = node.child(0).get_attr('op_name')
uniexp = self.on_node(node.child(1))
return uniexp.op(op_name)
def on_postexp(self,node):
return self.on_node(node.child(0)).op(node.child(1).get_attr('op_name'))
def on_func_call(self, node):
func = self.on_node(node.child(0))
args = [self.on_node(x) for x in node.getChildren()[1:]]
line_no = self.current_token.lineno
return func.call(args, self, line_no)
def on_array_index(self, node):
return self.on_node(node.child(0)).op("index", self.on_node(node.child(1)))
def on_class_member(self, node):
return self.on_node(node.child(0)).op("member", self.on_node(node.child(1)))
def on_type_cast(self, node):
return self.on_node(node.child(0)).op("tcast", self.on_node(node.child(1)))
def on_type(self, node):
base = self.on_node(node.child(0))
base_type = self.current_ns.get(base)
if node.dim > 0:
return lang.Array(base_type, node.dim)
else:
return base_type
def on_entity(self,node):
entity = node.child(0)
if entity.type == "cast":
return self.on_node(entity)
elif entity.type == "alloc":
return self.on_node(entity)
elif isinstance(entity,Leaf):
entity = self.on_node(entity)
if isinstance(entity,str):
if entity == '?': #input
return self.current_ns.get("read").call([],self)
else:
return self.current_ns.get(entity)
elif isinstance(entity,int):
return lang.Object(lang.intType, entity)
def on_cast(self,node):
'''cast 的语义? 最后一个statement 的值'''
for x in node.query("stlist>st"):
ret = self.on_node(x)
return ret
def on_alloc(self,node):
if len(node) == 2:
ret = self.on_node(node.child(1)).alloc()
else:
ret = self.on_node(node.child(1)).alloc(self.on_node(node.child(3)))
return ret
def on_apara(self,node):
return [self.on_node(x) for x in node.query("explist>exp")]
def on_token(self,node):
self.current_token = node
return node.value
class StaticTypeChecker(BaseAnnotateAction):
'''静态类型检查和计算'''
#正在标注的属性的名字
annotate_attr_name = 'type'
def __init__(self, ns):
self.global_ns = ns
self.current_ns = ns
self.errors = []
def add_error(self, e):
self.errors.append(error.Error(self.current_token.lineno, str(e)))
def _do_type_trans(self, node, op, *operands):
node.set_attr(self.annotate_attr_name, self._check_type(op, *operands))
def _check_type(self, op, *operands):
main_type = operands[0]
if len(operands) > 1:
arg = operands[1]
else:
arg = None
is_type_match = lang.do_type_trans(main_type, op, arg)
if not is_type_match:
if op =='member':
self.add_error(error.MemberError(operands[0], operands[1]))
else:
self.add_error(error.TypeCheckError(op))
return is_type_match
def on_binexp(self, node):
if len(node) >1:
self._do_type_trans(node,
node.child(1).get_attr('op_name'),
node.child(0).get_attr('type'),
node.child(2).get_attr('type')
)
else:
self._copy_from_first_child(node)
def before_funbody(self, node):
'''在遍历funcbody 的子节点之前,进去对应的名字空间'''
func_name = node.prev("head").child("id").value
self.current_ns = self.current_ns.get(func_name)
def on_funbody(self, node):
self.current_ns = self.global_ns
on_st = BaseAnnotateAction._copy_from_first_child
def on_cond(self, node):
node.set_attr(self.annotate_attr_name, lang.void)
def on_loop(self, node):
node.set_attr(self.annotate_attr_name, lang.void)
on_exp = BaseAnnotateAction._copy_from_first_child
on_assignexp = on_orexp = on_andexp = on_binexp
def on_uniexp(self, node):
if len(node) > 1:
self._do_type_trans(node,
node.child(0).get_attr('op_name'),
node.child(1).get_attr('type')
)
else:
self._copy_from_first_child(node)
def on_postexp(self, node):
self._do_type_trans(node,
node.child(1).get_attr('op_name'),
node.child(0).get_attr('type'),
)
def on_func_call(self, node):
'''函数调用,检查参数类型'''
#FIXME
func_name = node.child(0).query('**>?')[0].value
args = node.getChildren()[1:]
try:
func = self.current_ns.get(func_name)
except error.NameError,e:
self.add_error(e)
return None
# a little trick , not check static sem for built in functions
if func_name not in ['read', 'eof', 'print', 'println']:
if len(func.params_type) != len(args):
self.add_error(error.ParamCountNotMatchError(len(func.params_type), len(args)))
else:
for i in range(len(func.params_type)):
self._check_type('argument_pass', func.params_type[i], args[i].get_attr('type'))
node.set_attr('type', func.ret_type)
def on_array_index(self, node):
'''数组下标操作'''
self._do_type_trans(node, 'index', node.child(0).get_attr('type'), node.child(1).get_attr('type'))
def on_class_member(self, node):
'''结构体成员获取'''
self._do_type_trans(node, 'member', node.child(0).get_attr('type'), node.child(1).value)
def on_type_cast(self, node):
self._do_type_trans(node, 'tcast', node.child(0).get_attr('type'), node.child(1).get_attr('type'))
on_entity = BaseAnnotateAction._copy_from_first_child
def on_cast(self, node):
node.set_attr('type', node.child('stlist').get_attr('type'))
def on_stlist(self, node):
node.set_attr('type', node.query("st")[-1].get_attr('type'))
def on_alloc(self,node):
if node.query('['):
node.set_attr('type', lang.Array(node.child("type").get_attr('type')))
else:
node.set_attr('type', node.child("type").get_attr('type'))
def on_type(self, node):
base = node.child(0).value
try:
base_type = self.current_ns.get(base)
except error.StaticSemanticError, e:
self.add_error(e)
return None
if node.dim > 0:
node.set_attr('type', lang.Array(base_type, node.dim))
else:
node.set_attr('type', base_type)
def _on_token(self, node):
if node.type == "num" or node.type == '?':
node.set_attr('type', lang.intType)
elif node.type == "id":
try:
if node.ancestor("funbody"):
v = self.current_ns.get(node.value)
if isinstance(v, lang.Object):
node.set_attr('type', v.type)
except error.NameError, e :
pass
#self.add_error(e)
self.current_token = node
def run(data, input_file = sys.stdin, output_file = sys.stdout):
set_io(input_file, output_file)
try:
ast = parse(data)
do_op_annotate(ast)
global_ns = do_namespace_parse(ast)
if global_ns:
if check_static_semtanic(ast, global_ns):
inter = Interpreter(ast, global_ns)
inter.run()
except error.ParseError,e:
print >>sys.stderr,e
#print inter.global_ns.ns
def test_OPAnnotate(data):
ast = parse(data)
annotate_action = OPAnnotate()
ast_walker = BaseASTWalker(ast, annotate_action)
ast_walker.run()
for x in ast.query('**>?'):
print x._attr
def do_op_annotate(ast):
annotate_action = OPAnnotate()
ast_walker = BaseASTWalker(ast, annotate_action)
ast_walker.run()
return ast
def do_namespace_parse(ast):
parser = MoreParser(ast)
parser.parse()
if len(parser.errors) > 0:
for x in parser.errors:
print >>sys.stderr, x
return None
return parser.global_ns
def check_static_semtanic(ast, global_ns):
check_action = StaticTypeChecker(global_ns)
walker2 = BaseASTWalker(ast, check_action)
walker2.run()
if len(check_action.errors) > 0:
for e in check_action.errors:
print >>sys.stderr, e
return False
else:
return True
if __name__ == '__main__':
#test_OPAnnotate(test)
run(test)
| Python |
#coding=utf8
#$Id: __init__.py 201 2008-06-03 03:03:11Z Filia.Tao@gmail.com $
lang_info = {
'name' : 'L1',
'path' : 'smallc',
'suffix' : 'smc',
}
| Python |
#coding=utf8
#$Id: sementic.py 92 2008-04-22 13:28:20Z Filia.Tao@gmail.com $
#DONT'T READ OR USE THIS FILE
'''定义SmallC 的语义
1. 静态语义
首先是最简单的操作的类型匹配
'''
import interpretor.smallc.lang
#类型约束
#这个应该作为语言定义的一部分
#一条约束规则应该包含如下的内容
# * 操作符
# * 约束规则列表
#用一个简单的列表就可以
type_requirements = {}
#这里在全局字典 type_requirements 加入对应的条目,
#在静态类型检查时将要用到这个
def add_type_requirement(op_name, requirement):
if not type_requirements.has_key(op_name):
type_requirements[op_name] = set()
type_requirements[op_name].add(requirement)
def check_type_requirement(op_name, *operands):
ret = True
for func in type_requirements['op_name']:
if not func(*operands):
ret = False
break
return ret
#这个文件定义了语义动作
class BaseASTWalker:
def on_prog(self, node):
return node.child(0)
def on_class_decls(self, node):
return node.child(0)
def on_classdecl(self, node):
return node.child(0)
def on_decllist(self, node):
return node.child(0)
def on_decl(self, node):
return node.child(0)
def on_type(self, node):
return node.child(0)
def on_idlist(self, node):
return node.child(0)
def on_condecl(self, node):
return node.child(0)
def on_condef(self, node):
return node.child(0)
def on_vdecl(self, node):
return node.child(0)
def on_fdefs(self, node):
return node.child(0)
def on_fdef(self, node):
return node.child(0)
def on_head(self, node):
return node.child(0)
def on_paralist(self, node):
return node.child(0)
def on_paradecl(self, node):
return node.child(0)
def on_funbody(self, node):
return node.child(0)
def on_stlist(self, node):
return node.child(0)
def on_st(self, node):
return node.child(0)
def on_cond(self, node):
return node.child(0)
def on_loop(self, node):
return node.child(0)
def on_exp(self, node):
return node.child(0)
def on_orexp(self, node):
return node.child(0)
def on_andexp(self, node):
return node.child(0)
def on_relexp(self, node):
return node.child(0)
def on_relop(self, node):
return node.child(0)
def on_term(self, node):
return node.child(0)
def on_addop(self, node):
return node.child(0)
def on_factor(self, node):
return node.child(0)
def on_mulop(self, node):
return node.child(0)
def on_uniexp(self, node):
return node.child(0)
def on_uniop(self, node):
return node.child(0)
def on_postexp(self, node):
return node.child(0)
def on_postfix(self, node):
return node.child(0)
def on_apara(self, node):
return node.child(0)
def on_explist(self, node):
return node.child(0)
def on_sub(self, node):
return node.child(0)
def on_aselect(self, node):
return node.child(0)
def on_tcast(self, node):
return node.child(0)
def on_entity(self, node):
return node.child(0)
def on_cast(self, node):
return node.child(0)
def on_alloc(self, node):
return node.child(0)
| Python |
#coding=utf8
#$Id: parse.py 206 2008-06-05 13:17:28Z Filia.Tao@gmail.com $
from ply import yacc
from interpretor.smallc.lex import *
from interpretor.ast import Node,all_to_node,to_graph
from interpretor.smallc import error
start = 'prog'
def p_empty(p):
"empty : "
pass
#程序
def p_prog(p):
'''prog : class_decls const_decls var_decls fdefs
'''
p[0] = Node("prog",p[1:])
#类声明s
def p_class_decls(p):
'''class_decls : class_decls classdecl
| classdecl
| empty
'''
if len(p) > 2 :
if p[1]:
p[0] = Node("class_decls", p[1].getChildren() + [p[2]])
else:
p[0] = Node("class_decls", [p[2]])
elif p[1]:
p[0] = Node("class_decls",[p[1]])
def p_classdecl(p):
"classdecl : kw_class id '{' decllist '}'"
all_to_node(p)
p[0] = Node("classdecl",[p[2],p[4]])
def p_decllist(p):
'''decllist : decl ';' decllist
| decl
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("decllist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("decllist",p[1:])
def p_decl(p):
"decl : type idlist"
p[0] = Node("decl",p[1:])
#类型
def p_type(p):
'''type : type '[' ']'
| id
'''
all_to_node(p)
#FIXME make a ast not a ast ???
if len(p) > 2:
p[0] = p[1]
p[0].dim = p[0].dim + 1
else:
p[0] = Node("type", p[1:])
p[0].dim = 0
def p_idlist(p):
'''idlist : id ',' idlist
| id
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("idlist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("idlist",p[1:])
#可能的常量声明
def p_const_decls(p):
'''const_decls : condecl ';'
| empty
'''
all_to_node(p)
p[0] = p[1]
def p_condecl(p):
'''condecl : condecl ',' condef
| kw_const condef
'''
all_to_node(p)
if len(p) > 3:
p[0] = Node("condecl",p[1].getChildren() + p[3:])
else:
p[0] = Node("condecl",p[1:])
#常量定义
def p_condef(p):
'''condef : id '=' num
| id '=' '-' num
'''
all_to_node(p)
p[0] = Node("condef",p[1:])
#变量声明
def p_var_decls(p):
'''var_decls : vdecl
| empty
'''
p[0] = p[1]
def p_vdecl(p):
"vdecl : kw_var decllist kw_end"
all_to_node(p)
p[0] = Node("vdecl",p[1:])
#函数定义s
def p_fdefs(p):
''' fdefs : fdef fdefs
| fdef
'''
if len(p) > 2:
p[0] = Node("fdefs",[p[1]] + p[2].getChildren())
else:
p[0] = Node("fdefs",p[1:])
def p_fdef(p):
"fdef : kw_func type head '{' funbody '}'"
all_to_node(p)
p[0] = Node("fdef", [p[2], p[3], p[5]])
def p_head(p):
'''head : id '(' ')'
| id '(' paralist ')'
'''
all_to_node(p)
p[0] = Node("head", p[1:])
def p_paralist(p):
'''paralist : paradecl
| paradecl ',' paralist
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("paralist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("paralist",p[1:])
def p_paradecl(p):
"paradecl : type id "
all_to_node(p)
p[0] = Node("paradecl", p[1:])
def p_funbody(p):
'''funbody : vdecl stlist
| stlist
'''
p[0] = Node("funbody", p[1:])
#增加st ';' 允许在结尾加一个';'
def p_stlist(p):
'''stlist : st ';' stlist
| st
| st ';'
'''
all_to_node(p)
if len(p) == 4:
p[0] = Node("stlist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("stlist",[p[1]])
def p_st(p):
'''st : exp
| cond
| loop
'''
#p[0] = p[1]
p[0] = Node("st",p[1:])
def p_cond(p):
'''cond : kw_if '(' exp ')' st
| kw_if '(' exp ')' st kw_else st
'''
all_to_node(p)
p[0] = Node("cond",p[1:])
def p_loop(p):
'''loop : kw_while '(' exp ')'
| kw_while '(' exp ')' st
'''
all_to_node(p)
p[0] = Node("loop",p[1:])
def p_exp(p):
'''exp : orexp
| orexp '=' orexp
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node('exp', [Node("assignexp",p[1:])])
else:
p[0] = Node('exp', [p[1]])
def p_orexp(p):
'''orexp : andexp
| orexp orop andexp
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("orexp",p[1:])
else:
p[0] = p[1]
def p_andexp(p):
'''andexp : relexp
| andexp andop relexp
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("andexp",p[1:])
else:
p[0] = p[1]
def p_relexp(p):
'''relexp : term
| relexp relop term
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("binexp", p[1:])
else:
p[0] = p[1]
def p_relop(p):
'''relop : eqop
| neop
| ltop
| gtop
| leop
| geop
'''
all_to_node(p)
p[0] = Node("relop",p[1:])
def p_term(p):
'''term : factor
| term addop factor
'''
if(len(p) > 2):
p[0] = Node("binexp",p[1:])
else:
p[0] = p[1]
def p_addop(p):
'''addop : '+'
| '-'
'''
all_to_node(p)
#p[0] = p[1]
p[0] = Node("addop",p[1:])
def p_factor(p):
'''factor : uniexp
| factor multop uniexp
'''
if len(p) > 2:
p[0] = Node("binexp",p[1:])
else:
p[0] = p[1]
def p_mulop(p):
'''multop : '*'
| '/'
| '%'
'''
all_to_node(p)
p[0] = Node("multop",p[1:])
def p_uniexp(p):
'''uniexp : uniop uniexp
| postexp
'''
if(len(p) > 2):
p[0] = Node("uniexp",p[1:])
else:
p[0] = p[1]
def p_uniop(p):
'''uniop : '-'
| '!'
| incop
| decop
| chkop
'''
all_to_node(p)
p[0] = Node("uniop",p[1:])
def p_postexp(p):
'''postexp : entity
| postexp postfix
'''
if len(p) > 2:
if p[2].type == 'apara':
p[0] = Node("func_call", [p[1]] + p[2].query("explist>exp"))
elif p[2].type == 'sub':
p[0] = Node("array_index", [p[1], p[2][0]])
elif p[2].type == 'aselect':
p[0] = Node("class_member", [p[1], p[2][0]])
elif p[2].type == 'tcast':
p[0] = Node("type_cast", [p[1], p[2][0]])
else:
p[0] = Node("postexp",p[1:])
else:
p[0] = p[1]
def p_postfix(p):
'''postfix : incop
| decop
| apara
| sub
| aselect
| tcast
'''
all_to_node(p)
p[0] = p[1]
def p_apara(p):
'''apara : '(' explist ')'
| '(' ')'
'''
all_to_node(p)
p[0] = Node("apara", p[1:])
def p_explist(p):
'''explist : exp
| exp ',' explist
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("explist", [p[1]] + p[3].getChildren())
else:
p[0] = Node("explist", p[1:])
def p_sub(p):
"sub : '[' exp ']'"
all_to_node(p)
#p[0] = p[2]
p[0] = Node("sub", [p[2]])
def p_aselect(p):
"aselect : '.' id"
all_to_node(p)
p[0] = Node("aselect",[p[2]])
def p_tcast(p):
"tcast : ':' type"
all_to_node(p)
p[0] = Node("tcast",p[1:])
def p_entity(p):
'''entity : id
| num
| cast
| alloc
| '?'
'''
all_to_node(p)
p[0] = Node("entity",p[1:])
def p_cast(p):
"cast : '(' stlist ')'"
all_to_node(p)
p[0] = Node("cast",p[1:])
def p_alloc(p):
'''alloc : kw_new type
| kw_new type '[' exp ']'
'''
all_to_node(p)
p[0] = Node("alloc",p[1:])
def p_error(p):
import sys
#print >>sys.stderr,"parser error at line %d token '%s'" %(p.lineno, p.value)
raise error.ParseError(p)
#在上面的产生式,起辅助作用的,不会产生AST结点
ast_ommit = ('error', 'empty', 'var_decls', 'const_decls')
parser = yacc.yacc()
def parse(data):
p = parser.parse(data)
return p
if __name__ == '__main__':
#test = open("../../test/smallc/parse.smc").read()
n = parse(test)
#print n
to_graph(n, "test_smallc")
| Python |
#coding=utf8
#$Id: lang.py 119 2008-04-27 06:07:41Z Filia.Tao@gmail.com $
'''
Small C 语言只有三种类型。
1. 整形
2. Void
3. 数组
4. 结构体 (数组)
注意这个里面变量名是类似java 的引用机制。
怎样处理特殊的null 值? (用Object(nullType,"Null Value") 来表示。
从程序中可以看到 null 似乎可以赋值给任何类型的对象。(除了整数对象)
从给的示例代码来看,似乎 整形默认值为0 ,其他默认值为null
一个结构体,member 也按这个规则初始化。
我现在的实现没有这么办。。 待讨论
关于类型约束:
1.赋值
2.操作符 (算术,逻辑)
3.函数参数传递
4.强制类型转换
类型约束放在每个类型的定义里面
'''
import interpretor.smallc.error as error
def do_type_trans(main_type, op_name, arg = None):
if op_name == "argument_pass":
return main_type.do_type_trans("assign", arg)
else:
return main_type.do_type_trans(op_name, arg)
#修饰符函数,用于动态类型检查
def require_same(func):
def wrapped(self,lhs,rhs):
if self != rhs.type:
raise error.TypeError(self,rhs)
return func(self,lhs,rhs)
return wrapped
def require_same_or_null(func):
def wrapped(self,lhs,rhs):
if self != rhs.type and rhs.type != nullType:
raise error.TypeError(self,rhs)
return func(self,lhs,rhs)
return wrapped
def require_castable(func):
def wrapped(self,lhs,rhs):
if self != rhs and rhs != void:
raise error.TypeError(self,rhs)
return func(self,lhs,rhs)
return wrapped
def require_not_empty(func):
def wrapped(self,lhs,rhs):
if lhs.value is None:
raise error.EmptyReferenceError(lhs)
return func(self,lhs,rhs)
return wrapped
class Type(object):
'''SmallC 语言类型基类
支持的的操作有 assign , eq , ne, tcast
'''
def __init__(self):
raise Exception("DON't USE THIS CLASS DIRECTLY")
self.name = "type"
def to_str(self,obj):
return str(obj.value)
def do_type_trans(self, op_name, arg = None):
if op_name == 'assign':
if arg == self:
return self
else:
return None
elif op_name in ('eq','ne'):
if arg == self:
return intType
else:
return None
elif op_name == 'tcast':
if self == arg or arg == voidType:
return arg
else:
return None
else:
return None
@require_same
def op_assign(self,lhs,rhs):
lhs.value = rhs.value
return lhs
@require_same
def op_eq(self,lhs,rhs):
return Object(intType, int(lhs.value is rhs.value))
@require_same
def op_ne(self,lhs,rhs):
ret = self.op_eq(lhs,rhs)
ret.value = [1,0][ret.value]
return ret
@require_castable
def op_tcast(self,obj,type):
'强制类型转换'
return Object(type, obj.value)
def alloc(self, size = None):
'统一的空间分配策略,具体的分配实现由子类完成 alloc_one 方法'
if size:
ret = Object(Array(self))
ret.value = [self.alloc() for i in range(size.value)]
return ret
else:
return self.alloc_one()
def repr(self,obj):
return repr(obj.value)
def __eq__(self,rhs):
return self.name == rhs.name
def __ne__(self,rhs):
return not self.__eq__(rhs)
def __repr__(self):
return "<SmallC Type %s>" %self.name
__str__ = __repr__
class Void(Type):
def __init__(self):
self.name = "void"
class Integer(Type):
'''Small C 整数类型'''
def __init__(self):
self.name = "int"
def do_type_trans(self, op_name, arg = None):
'''检查类型匹配,并根据操作符返回对应的类型 不匹配时返回 None'''
if op_name in ('and', 'or', 'lt', 'gt', 'le',
'ge','add', 'minus', 'mul', 'div' , 'mod',
'assign', 'eq', 'ne'):
if arg and arg == self:
return self
else:
return None
elif hasattr(self, "op_" + op_name):
return self
else:
return super(Integer,self).do_type_trans(op_name, arg)
def asBool(self,obj):
return bool(obj.value)
@require_same
def op_or(self,lhs,rhs):
return Object(intType, int(bool(lhs.value or rhs.value)))
@require_same
def op_and(self,lhs,rhs):
return Object(intType, int(bool(lhs.value and rhs.value)))
@require_same
def op_eq(self,lhs,rhs):
return Object(intType, int(lhs.value == rhs.value))
@require_same
def op_ne(self,lhs,rhs):
return Object(intType, int(lhs.value != rhs.value))
@require_same
def op_lt(self,lhs,rhs):
return Object(intType, int(lhs.value < rhs.value))
@require_same
def op_gt(self,lhs,rhs):
return Object(intType, int(lhs.value > rhs.value))
@require_same
def op_le(self,lhs,rhs):
return Object(intType, int(lhs.value <= rhs.value))
@require_same
def op_ge(self,lhs,rhs):
return Object(intType, int(lhs.value >= rhs.value))
@require_same
def op_add(self,lhs,rhs):
return Object(intType, lhs.value + rhs.value)
@require_same
def op_minus(self,lhs,rhs):
return Object(intType, lhs.value - rhs.value)
@require_same
def op_mul(self,lhs,rhs):
return Object(intType, lhs.value * rhs.value)
@require_same
def op_div(self,lhs,rhs):
return Object(intType, lhs.value / rhs.value)
@require_same
def op_mod(self,lhs,rhs):
return Object(intType, lhs.value % rhs.value)
#以下为单目操作
def op_minus_(self, rhs):
return Object(intType, - rhs.value)
def op_not(self, rhs):
return Object(intType, int(not rhs.value) )
def op_inc(self, rhs):
rhs.value += 1
return rhs
def op_dec(self, rhs):
rhs.value -= 1
return rhs
def op_chk(self, rhs):
if rhs.value == 0:
raise error.ChkFailError()
return rhs
def op_inc_(self, lhs):
ret = Object(intType, lhs.value)
lhs.value += 1
return ret
def op_dec_(self, lhs):
ret = Object(intType, lhs.value)
lhs.value -= 1
return ret
def alloc_one(self):
'int 的空间分配方法。 设置初值为0.'
#TODO is default value 0 OK ?
return Object(self,0)
class Array(Type):
'''Array
总是一维的,多维可以由数组的数组组成
'''
def __init__(self,base,dim = 1):
if dim > 1:
self.base = Array(base, dim-1)
else:
self.base = base
self.name = self.base.name + "[]"
def do_type_trans(self, op_name, arg = None):
'''检查类型匹配,并根据操作符返回对应的类型 不匹配时返回 None'''
if op_name in ('eq', 'ne'):
if (arg == self or arg == nullType):
return intType
else:
return None
elif op_name == "assign":
if (arg == self or arg == nullType):
return self
else:
return None
elif op_name == "index":
if arg == intType:
return self.base
else:
return None
elif op_name == "member":
if arg == "length":
return intType
else:
return None
return super(Array,self).do_type_trans(op_name, arg)
def to_str(self,obj):
return '[' + ",".join([x.to_str() for x in obj.value]) + ']'
@require_same_or_null
def op_assign(self,lhs,rhs):
lhs.value = rhs.value
return lhs
@require_not_empty
@require_same_or_null
def op_eq(self,lhs,rhs):
return Object(intType, int(lhs.value is rhs.value))
@require_not_empty
@require_same_or_null
def op_ne(self,lhs,rhs):
return Object(intType, int(not (lhs.value is rhs.value)))
@require_not_empty
def op_index(self, lhs, rhs):
if rhs.type != intType:
raise error.TypeError(intType,rhs)
if lhs.value == null.value:
raise error.NullError(lhs)
ind = rhs.value
if ind < 0 or ind >= len(lhs.value): #动态错误
raise error.IndexError(rhs.value,(0,len(lhs.value)))
return lhs.value[ind]
def op_member(self,lhs,rhs):
'''
array 只支持一个member length
'''
if rhs != "length":
raise error.MemberError(lhs,rhs)
else:
return Object(intType,len(lhs.value))
def alloc_one(self):
return Object(self)
class Struct(Type):
def __init__(self,name):
self.name = name
self.members = {}
def do_type_trans(self, op_name, arg = None):
'''检查类型匹配,并根据操作符返回对应的类型 不匹配时返回 None'''
if op_name in ('eq', 'ne'):
if (arg == self or arg == nullType):
return intType
else:
return None
elif op_name == "assign":
if (arg == self or arg == nullType):
return self
else:
return None
elif op_name == "member":
if arg in self.members:
return self.members[arg]
else:
return None
return super(Struct,self).do_type_trans(op_name, arg)
def add_member(self,type,member_name):
self.members[member_name] = type
@require_same_or_null
def op_assign(self,lhs,rhs):
lhs.value = rhs.value
return lhs
@require_not_empty
@require_same_or_null
def op_eq(self,lhs,rhs):
return Object(intType, int(lhs.value is rhs.value))
@require_not_empty
@require_same_or_null
def op_ne(self,lhs,rhs):
return Object(intType, int(not (lhs.value is rhs.value)))
@require_not_empty
def op_member(self,lhs,rhs):
if lhs.value == null.value:
raise error.NullError(lhs)
if rhs not in self.members:
raise error.MemberError(lhs,rhs)
return lhs.value[rhs]
def __repr__(self):
ret = "<SmallC Type %s{" %self.name
ret += ",".join(["%s:%s" %(x,self.members[x].name) for x in self.members])
ret += "}>"
return ret
def alloc_one(self):
ret = Object(self)
ret.value = {}
for name in self.members:
ret.value[name] = Object(self.members[name])
return ret
class NullType(Type):
def __init__(self):
self.name = "NullType"
def do_type_trans(self, op_name, arg = None):
'''检查类型匹配,并根据操作符返回对应的类型 不匹配时返回 None'''
if op_name in ('eq', 'ne'):
if (arg == self or arg == nullType):
return intType
return None
def asBool(self,obj):
return False
def op_eq(self,lhs,rhs):
return Object(intType, int(lhs.type == rhs.type and lhs.value is rhs.value))
def op_ne(self,lhs,rhs):
return Object(intType, int(lhs.type != rhs.type or lhs.value is not rhs.value))
class Object:
def __init__(self,type,value = None):
self.type = type
self.value = value
#TODO ugly here
if value is None and type is intType:
self.value = 0
def __nonzero__(self):
if hasattr(self.type, "asBool"):
return self.type.asBool(self)
else:
#raise Erro (cant't convert to bool value)
return bool(self.value)
def __not__(self):
return not self.__nonzero__()
def op(self,op,arg = None):
if hasattr(self.type,"op_"+op):
func = getattr(self.type,"op_"+op)
if arg is not None:
return func(self,arg)
else:
return func(self)
else:
raise error.UnsupportedOPError(op)
def to_str(self):
return self.type.to_str(self)
def __repr__(self):
return self.type.repr(self)
#return "SmallC Object <" + repr(self.type) + " : " + repr(self.value) + ">"
__str__ = __repr__
class ConstObject(Object):
def op(self,op,arg = None):
if op == "assign" or op == "inc" or op == "inc_" or op == "dec" or op == "dec_":
raise error.UnsupportedOPError(op)
else:
return Object.op(self,op,arg)
def __repr__(self):
return "SmallC Const Object <" + repr(self.value) + " : " + self.type.name+ ">"
__str__ = __repr__
#some special values
intType = Integer()
void = Void()
nullType = NullType()
null = ConstObject(nullType,"NULL VALUE")
| Python |
#coding=utf8
#$Id: error.py 206 2008-06-05 13:17:28Z Filia.Tao@gmail.com $
class Error(Exception):
error_type = "error" #can be error, warning , notice
def __init__(self, lineno, msg):
self.lineno = lineno
self.msg = msg
def __str__(self):
return "line %s: %s: %s" %(self.lineno, self.error_type, self.msg)
class LangError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
__repr__ = __str__
#语法分析错误
class ParseError(Exception):
def __init__(self,token):
self.token = token
def __str__(self):
if self.token:
return "Parser error at line %d token '%s'" %(self.token.lineno, self.token.value)
else:
return "Parse error , unknow location"
#静态语义错误
class StaticSemanticError(LangError):
'''静态语义错误'''
pass
class NameError(StaticSemanticError):
'名字未定义错误'
def __init__(self, name):
self.name = name
self.msg = "name '%s' is not defined" %(self.name)
class NameReDefineError(StaticSemanticError):
'名字重定义错误'
def __init__(self, name):
self.name = name
self.msg = "name '%s' is already defined" %(self.name)
class TypeCheckError(StaticSemanticError):
'''类型匹配错误'''
def __init__(self, op):
self.op = op
self.msg = "type not match for operation '%s'" %(self.op)
class MemberError(StaticSemanticError):
'''对象成员错误'''
def __init__(self, type, member):
self.type = type
self.member = member
self.msg = "'%s' dont't have '%s' member" %(self.type.name, self.member)
class NoPrivateMemberError(StaticSemanticError):
'''对象公共成员错误'''
def __init__(self, type, member):
self.type = type
self.member = member
self.msg = "'%s' dont't have '%s' member" %(self.type.name, self.member)
class ClsMmeberError(StaticSemanticError):
'''类成员错误'''
def __init__(self, type, member):
self.type = type
self.member = member
self.msg = "'%s' dont't have '%s' class member" %(self.type.name, self.member)
class ParamCountNotMatchError(StaticSemanticError):
def __init__(self, expect_count, real_count):
self.expect_count = expect_count
self.real_count = real_count
self.msg = "param count not match , expect %d , got %d" %(self.expect_count, self.real_count)
class TCastError(LangError):
def __init__(self,lhs,rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
return "Cant't convert from %s to %s" %(self.lhs, self.rhs)
class TypeError(LangError):
def __init__(self,lhs,rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
return "Except %s , got %s" %(self.lhs,self.rhs)
class MultipleError(LangError):
def __init__(self,name):
self.name = name
def __str__(self):
return "name %s already defined" %(self.name)
class UnsupportedOPError(LangError):
def __init__(self,op):
self.op = op
def __str__(self):
return "unsupported operation '%s'" %(self.op)
class IndexError(LangError):
def __init__(self,ind,range):
self.index = ind
self.range = range
def __str__(self):
return "index %s out of range %s" %(self.index ,repr(self.range))
class ChkFailError(LangError):
def __str__(self):
return "chk failed"
class NullError(LangError):
def __init__(self,obj):
self.obj = obj
def __str__(self):
return "%s is null." %(self.obj)
class MemberError(LangError):
def __init__(self,obj,member):
self.obj = obj
self.member = member
def __str__(self):
return "%s don't have '%s' member ." %(self.obj,self.member)
class UnimplementedMethodError(LangError):
def __init__(self, method, cls):
self.method = method
self.cls = cls
def __str__(self):
return "method '%s' of class '%s' is unimplemented (abstract)." %(self.method, self.cls) | Python |
#coding=utf8
#$Id: function.py 204 2008-06-04 12:56:45Z Filia.Tao@gmail.com $
import copy,sys
from interpretor.ooc import lang
from interpretor.ooc import error
def report_none(func):
def w(self, t):
r = func(self, t)
if r is None:
print "get %s from %s got None" , t, self.name
return r
return w
def copy_ns(ns_dict):
ret = copy.copy(ns_dict)
for x in ret:
ret[x] = copy.copy(ns_dict[x])
return ret
io = {
'input' : sys.stdin,
'output' : sys.stdout,
'input_buff' : "",
'is_eof' : 0
}
def set_io(input_f,output_f):
io['input'] = input_f
io['output'] = output_f
io['input_buff'] = ""
io['is_eof'] = 0
class Namespace:
'''名字空间。 出现ID时从名字空间中获得对应的对象
实际上只有一个全局名字空间是这个类的实例。其他都是对应子类的实例。
'''
def __init__(self, upper=None):
self.upper = upper #上级名字空间?
self.ns = {}
self.name = "global"
def get(self, name):
"取得name 对应的对象"
if name in self.ns:
return self.ns[name][0]
elif self.upper:
return self.upper.get(name)
else:
raise error.NameError(name)
# def __getitem__(self, key):
# return self.get(key)
def set(self, name, value, decorate = None):
"将名字加入到名字空间,检查重复定义"
if name in self.ns:
raise error.MultipleError(name)
else:
self.ns[name] = (value,decorate)
def __repr__(self):
return "Namespace %s" %(self.name)
__str__ = __repr__
class Function(Namespace):
'''函数。 Namespace 的子类。
OOC 语言中的函数都属于某一个类
名字,对应类,返回类型,修饰符(public,static,redef等)
参数有add_param 方法增加
TODO: abstract 的问题?
'''
def __init__(self,name,cls,ret_type, decorate):
self.name = name
self.params = []
self.params_type = []
self.ns = {}
self.cls = cls
self.ret_type = ret_type
self.decorate = decorate
self.obj = None #调用函数的对象。
self._bind = None
def add_param(self,name,type):
'增加一个参数'
self.params.append(name)
self.params_type.append(type)
self.set(name, lang.Object(type))
def freeze(self):
'函数定义完成后,冻结这个函数'
#if self.decorate != 'static':
# self.ns['this'] = self.cls.alloc_one()
self.ns_org = copy_ns(self.ns)
def set_param(self, name, value):
'''函数调用时,设置参数的值'''
if name not in self.ns:
raise NameError(name)
else:
self.ns[name].op("assign",value) #注意这里,使用op_assign来设置参数可保证语义正确
def bind(self, obj):
'''将方法绑定到一个对象上'''
#print "hi , binding this to %s on function %s" %(obj,self.name)
self._bind = obj
return self
def unbind(self):
self._bind = None
#@report_none
def get(self, name):
'''取得name 对应的对象
函数中访问一个名字,来源可能有:
函数名字空间,实例变量,类变量,类方法,全局名字
'''
if name in self.ns:
return self.ns[name]
else:
if self.decorate == "static":#如果调用的静态方法
#类静态变量,对全局名字空间的访问包含在这个调用中,下同。
return self.cls.op_get_cls(name)
else:#在非静态函数中,必然存在一个this对象
#assert 'this' in self.ns
#print self.cls,self.name,name
if 'this' in self.ns:
obj = self.ns['this']
elif self._bind is not None:
obj = self._bind
else:
raise StandardError()
if name == 'this':
return obj
#通过实例可以访问的变量。包括实例变量和类变量
r = obj.op("get", name)
if r is None:
raise error.NameError(name)
return r
def set(self, name, value):
'增加名字。函数自己的名字空间中没有修饰符的区别'
if name in self.ns:
raise error.MultipleError(name)
else:
self.ns[name] = value
def call(self, args, inter, line_no = None):
'在obj 上调用本函数,参数为args, 解释器对象是inter'
#assert self.decorate == 'static' or 'this' in self.ns
#print self.decorate == 'static'
#print 'this' in self.ns
#print "calling func %s with args %s, obj %s" %(self.name,args, self._bind)
#将当前函数压入调用栈
inter.call_stack.append((self, line_no))
#下面三个是现场保护, 名字空间字典,调用对象,解释器但前名字空间
ns_now = self.ns
#这里的copy_ns 是一个半深半浅的复制
self.ns = copy_ns(self.ns_org)
if self._bind is not None:
#print "self set this" , self._bind, self.name
#self._bind.org_type = self.cls
self.set('this', self._bind)
self._bind = None
old_current = inter.current_ns
inter.current_ns = self
for i in range(len(self.params)):
self.set_param(self.params[i], args[i])
for st in self.statements:
ret = inter.on_statement(st)
#恢复现场
self.ns = ns_now
inter.current_ns = old_current
#调用栈弹出当前函数
inter.call_stack.pop()
#print "exit func %s " %(self.name)
#转换成返回类型
return ret.op("tcast", self.ret_type)
def __repr__(self):
return "Function %s " %self.name
__str__ = __repr__
class AbstractFunction(Function):
def __init__(self,name,cls,ret_type, decorate):
self.name = name
self.params_type = []
self.cls = cls
self.ret_type = ret_type
self.decorate = decorate
def call(self, obj, args, inter, line_no = None):
'试图调用abstract 函数时,引发一个异常'
raise error.UnimplementedMethodError(self.name , self.cls.name)
#下面是几个内置函数,或者说是操作符。
class PrintFunc(Function):
def __init__(self):
self.name = "print"
def call(self,args,inter,line_no):
for x in args:
print >>io['output'], x.to_str(),
return lang.Object(lang.void)
def __repr__(self):
return "function %s" %(self.name)
class PrintlnFunc(Function):
def __init__(self):
self.name = "println"
def call(self,args,inter,line_no):
for x in args:
print >>io['output'], x.to_str(),
print >>io['output']
return lang.Object(lang.void)
class ReadFunc(Function):
def __init__(self):
self.name = "read"
self.input = input
def call(self,args,inter,line_no):
if io["input_buff"]:
inp = io["input_buff"]
io["input_buff"] = ""
line = io['input'].readline()
if line == "":
io['is_eof'] = 1
else:
io["input_buff"] = line.strip()
else:
line = io['input'].readline()
if line == "":
raise error.EOFError()
else:
inp = line.strip()
try:
inp = int(inp)
except ValueError,e:
raise error.LangError()
return lang.Object(lang.intType, inp)
class EofFunc(Function):
def __init__(self):
self.name = "eof"
self.input = input
def call(self,args,inter,line_no):
if not io["input_buff"] and not io['is_eof']:
line = io['input'].readline()
if line == "":
io['is_eof'] = 1
else:
io["input_buff"] = line.strip()
return lang.Object(lang.intType, io["is_eof"])
def get_built_in_ns():
built_in_ns = Namespace()
built_in_ns.ns = {
'int': (lang.intType,"builtin"),
'void': (lang.void,"builtin"),
'null': (lang.null,"builtin"),
'print': (PrintFunc(),"builtin"),
'println': (PrintlnFunc(),"builtin"),
'read': (ReadFunc(),"builtin"),
'eof': (EofFunc(),"builtin"),
'Object': (lang.rootClass,"builtin"),
'String': (lang.string,"builtin")
}
return built_in_ns
| Python |
#coding=utf8
#$Id: lex.py 189 2008-05-27 14:57:58Z Filia.Tao@gmail.com $
#Copyright 2007 Tao Fei (filia.tao@gmail.com)
#Released under GPL V3 (or later)
#see http://www.gnu.org/copyleft/gpl.html for more details
import ply.lex as lex
tokens = ('id', 'num',
'orop','andop','eqop', 'neop', 'ltop', 'gtop', 'leop', 'geop', 'chkop', 'incop', 'decop',
'kw_class', 'kw_const', 'kw_var', 'kw_end', 'kw_func', 'kw_while', 'kw_if', 'kw_else', 'kw_new',
"kw_abstract","kw_private","kw_public","kw_redef","kw_static",
"string"
)
literals = ['{', '}', ';', ",", "[", "]", '(', ')', '=', '+', '-', '*', '/', '%' ,'!', '@' ,'.', '?', ':']
#t_assignop = r'='
#
#t_addop = r'+'
#
#t_minusop = r'-'
#
#t_mulop = r'*'
#
#t_divop = r'/'
#
#t_modop = r'%'
#
#t_notop = r'!'
t_orop = r'\|\|'
t_andop = r'&&'
t_eqop = r'=='
t_neop = r'!='
t_ltop = r'<'
t_gtop = r'>'
t_leop = r'<='
t_geop = r'>='
t_incop = r'\+\+'
t_decop = r'--'
def t_num(t):
r'\d+'
try:
t.value = int(t.value,10);
except ValueError:
print "Number %s is bad!" % t.value
t.value = 0
return t
def t_string(t):
r'".*"'
t.value = eval('%s' %(t.value))
return t
reserved = {
"class": "kw_class",
"const": "kw_const",
"var": "kw_var",
"end": "kw_end",
"func": "kw_func",
"while": "kw_while",
"if": "kw_if",
"else": "kw_else",
"new": "kw_new",
"static": "kw_static",
"private": "kw_private",
"public": "kw_public",
"abstract":"kw_abstract",
"redef": "kw_redef",
"chk": "chkop"
}
def t_id(t):
r'\b[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'id') # Check for reserved words
return t
def t_newline(t):
r'\n'
t.lexer.lineno += 1
t_ignore = ' \r\t\v'
# Comments
def t_comment1(t):
r'//.*'
pass
def t_comment2(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
def t_error(t):
print "Illegal character '%s' on line %d " % (t.value[0],t.lexer.lineno)
# Compute column.
# input is the input text string
# token is a token instance
def find_column(input,token):
i = token.lexpos
while i > 0:
if input[i] == '\n': break
i -= 1
column = (token.lexpos - i)+1
return column
test = '''
class abstract Node { public func abstract int target() func abstract int subnodenum()
func abstract Node down(int) func abstract void output(int) }
class abstract Mono:Node { public func abstract Node up() }
class Backtracking { private var Mono prob; int num, bound end
public func Backtracking constructor(Mono p, int b){prob=p;bound=b; this }
func void depthfirst(){ var int m, i end m=prob.subnodenum(); i=0;
if (prob.target())( prob.output(1); ++num);
while (num<bound && i<m)
if (prob.down(i++)!=null)( depthfirst(); prob.up()) } }
class Queens:Mono { private var int n, level; int[] board, column, d1, d2 end
public func Queens constructor(int k){ n=k; board=new int[n];
column=new int[n]; d1=new int[2*n-1]; d2=new int[2*n-1]; this }
redef func int target(){ level==n }
func int subnodenum(){ if (level<n) n else 0 }
func Node down(int i){ var Queens ans; int norm, k end ans=null;
if (level<n && !column[i] && !d1[level+i] && !d2[level-i+n-1])(
column[i]=1; d1[level+i]=1; d2[level-i+n-1]=1;
board[level++]=i; ans=this); ans }
func Node up(){ var int i end
i=board[--level]; column[i]=0; d1[level+i]=0; d2[level-i+n-1]=0; this }
func void output(int b){ var int i end
if (level==n)( i=0; while (i<n) print(board[i++]); println() ) } }
class Main { static func void main(){
new Backtracking.constructor(new Queens.constructor(8), 1).depthfirst() } }
'''
test = '''
class abstract Node { public func abstract int target()
func abstract int subnodenum()
func abstract Node down(int)
func abstract void output(int) }
class Back { private var Link head; int num, bound end
public func Back constructor(Node p, int b){
head=new Link.constructor(p, null); bound=b; this }
func void depthfirst(){ var Node p, sub; int m, i end
p=head.prob; m=p.subnodenum(); i=0;
if (p.target())( head.output(1); ++num);
while (num<bound && i<m)
if ((sub=p.down(i++))!=null)(
head=new Link.constructor(sub, head); depthfirst(); head=head.next) } }
class Link { public var Node prob; Link next end
func Link constructor(Node p, Link n){ prob=p; next=n; this }
func void output(int b){ if (next!=null) next.output(0); prob.output(b) } }
class Queens:Node { const n=8;
private var int level; int[] board end
public func Queens constructor1(){ board=new int[1]; this }
func Queens constructor2(int lev, int[] b){ var int k end
level=lev; board=new int[level+1];
k=0; while (k<level)( board[k]=b[k]; ++k); this }
redef func int target(){ level==n }
func int subnodenum(){ if (level<n) n else 0 }
func Node down(int i){ var Queens ans; int norm, k end
norm=level<n; k=level-1; ans=null;
while (norm && k>=0)( norm=i!=board[k] && i+level-k!=board[k] && i-level+k!=board[k]; --k );
if (norm)( board[level]=i; ans=new Queens.constructor2(level+1, board) ); ans }
func void output(int b){ var int i end if (level==n)( i=0; while (i<n) print(board[i++]); println() ) } }
class Main { static func void main(){
new Back.constructor(new Queens.constructor1(), 1).depthfirst() } }
'''
test = '''
/* class abstract */
/*
mulit line
comment
*/
class abstract Node { public func abstract int target() // comment end line
func abstract int subnodenum() func abstract Node down(int)
func abstract void output(int) }
class LabelBack { private var Link head; int num, bound; Set nodeset end
public func LabelBack constructor(Node p, int b, Set s){
head=new Link.constructor(p, null);
bound=b; nodeset=s; nodeset.add(p); this }
func void depthfirst(){ var Node p, sub; int m, i end
p=head.prob; m=p.subnodenum(); i=0;
if (p.target())( head.output(1); ++num);
while (num<bound && i<m)
if ((sub=p.down(i++))!=null && !nodeset.contains(sub))(
head=new Link.constructor(sub, head); nodeset.add(sub);
depthfirst(); head=head.next; nodeset.remove(sub)) } }
class Link { public var Node prob; Link next end
func Link constructor(Node p, Link n){ prob=p; next=n; this }
func void output(int b){ if (next!=null) next.output(0); prob.output(b) } }
class abstract Set { public func abstract int contains(Object)
func abstract void add(Object) func abstract void remove(Object) }
class abstract PairNode:Node { public func abstract int tag1()
func abstract int tag2() }
class Knight:PairNode { static var int m, n; int[][] move, board end
private var int x, y, level end
public func Knight constructor1(int mm, int nn, int[][] mov){
var int i end m=mm; n=nn; move=mov; level=1;
board=new int[][m]; i=0; while (i<m) board[i++]=new int[m]; this }
func Knight constructor2(int lev, int xx, int yy){ level=lev; x=xx; y=yy; this }
redef func int target(){ level==m*m }
func int subnodenum(){ if (level<m*m) n else 0 }
func Node down(int i){ var int x1, y1 end x1=x+move[i][0]; y1=y+move[i][1];
if (x1>=0 && x1<m && y1>=0 && y1<m)
new Knight.constructor2(level+1, x1, y1) else null }
func void output(int b){ var int i, j end board[x][y]=level;
if (level==m*m)( i=0;
while (i<m)( j=0;
while (j<m)( print(board[i][j]); ++j); println(); ++i)) }
func int tag1(){ x } func int tag2(){ y } }
class Bmatrix:Set { private var int[][] set end
public func Bmatrix constructor(int row, int column) { var int i end
set=new int[][row]; i=0; while (i<row) set[i++]=new int[column]; this }
redef func int contains(Object o){ var int k1, k2 end
k1=o:PairNode.tag1(); k2=o:PairNode.tag2();
k1>=0 && k1<set.length && k2>=0 && k2<set[k1].length && set[k1][k2] }
func void add(Object o){ var int k1, k2 end
k1=o:PairNode.tag1(); k2=o:PairNode.tag2();
if (k1>=0 && k1<set.length && k2>=0 && k2<set[k1].length) set[k1][k2]=1 }
func void remove(Object o){ var int k1, k2 end
k1=o:PairNode.tag1(); k2=o:PairNode.tag2();
if (k1>=0 && k1<set.length && k2>=0 && k2<set[k1].length) set[k1][k2]=0 } }
class Main {
static func void main(){ var int n, i; int[][] move; Set nset end n=5;
nset=new Bmatrix.constructor(n, n); move=new int[][8];
i=0; while (i<8) move[i++]=new int[2]; move[0][0]=2; move[0][1]=1; move[1][0]=1; move[1][1]=2;
move[2][0]=-1; move[2][1]=2; move[3][0]=-2; move[3][1]=1; move[4][0]=-2; move[4][1]=-1;
move[5][0]=-1; move[5][1]=-2; move[6][0]=1; move[6][1]=-2; move[7][0]=2; move[7][1]=-1;
new LabelBack.constructor(new Knight.constructor1(n, 8, move), 1, nset).depthfirst() } }
'''
lex.lex()
if __name__ == '__main__':
lexer = lex.lex()
lexer.input(test)
while 1:
tok = lexer.token()
if not tok: break
print tok
| Python |
#coding=utf8
#$Id: interp.py 206 2008-06-05 13:17:28Z Filia.Tao@gmail.com $
'''
ooc 语言解释器
工作在抽象语法树上。
'''
import operator
import copy
import sys
import ply
import interpretor.ooc.lang as lang
from interpretor.ooc.parse import parse
from interpretor.ooc.function import Function,AbstractFunction,get_built_in_ns,copy_ns,set_io
from interpretor.ooc.lex import test
import interpretor.ooc.error as error
from interpretor.ast import Node,Leaf,BaseASTWalker,BaseAnnotateAction
from interpretor.common import CommonOPAnnotate as OPAnnotate
class MoreParser:
'''在AST 基础上进一步处理,根据声明语句解析类声明'''
def __init__(self,ast):
self.ast = ast
self.global_ns = get_built_in_ns()
self.current_ns = self.global_ns
self.errors = []
def add_error(self, e):
self.errors.append(error.Error(self.current_token.lineno, str(e)))
def parse(self):
'''walk the ast , build the golbal namespace and classses '''
#类定义 注意这里分了两步来解析类声明。用来解决嵌套问题。 一个类的成员是另一个尚未定义的类
for n in self.ast.query("classdecl"):
name = self.on_token(n.child('id'))
base_cls = None
decorate = None
if n.child('pos_superclass'):
base = self.on_token(n.child('pos_superclass').child(1))
base_cls = self.current_ns.get(base)
if n.child('pos_abstract'):
decorate = "abstract"
cls = lang.Class(name, self.global_ns, base_cls, decorate)
self.global_ns.set(name, cls, decorate)
for n in self.ast.query("classdecl"):
name = self.on_token(n.child('id'))
cls = self.global_ns.get(name)
#常量
for con in n.query("pos_condecl>condecl>condef"):
self.on_condef(con, cls)
for mem in n.query("pos_static>member"):
self.on_member(mem, cls, "static")
for mem in n.query("pos_private>member"):
self.on_member(mem, cls, "private")
for mem in n.query("pos_public>member"):
self.on_member(mem, cls, "public")
for cfdef in n.query("pos_redef>cfdef_list>cfdef"):
self.on_cfdef(cfdef, cls, "redef")
def on_member(self,node,cls,decorate):
for decl in node.query("vdecl>decllist>decl"):
self.on_decl_inside_class(decl,cls,decorate)
for afdef in node.query("fdefs>fdef>afdef"):
self.on_afdef(afdef,cls,decorate)
for cfdef in node.query("fdefs>fdef>cfdef"):
self.on_cfdef(cfdef,cls,decorate)
def on_decl_inside_class(self,node,cls,decorate):
type = self.on_type(node.child(0))
for id in node.child(1):
cls.add_var(self.on_token(id),type,decorate)
def on_afdef(self,node,cls,decorate):
"抽象函数声明"
name = self.on_token(node.child(3))
fns = AbstractFunction(name,cls,self.on_type(node.child(2)),decorate)
cls.add_func(name,fns,decorate)
for type in node.query("type_list>type"):
fns.params_type.append(self.on_type(type))
def on_cfdef(self,node,cls,decorate):
name = self.on_token(node.child(2).child(0))
fns = Function(name,cls,self.on_type(node.child(1)),decorate)
cls.add_func(name,fns,decorate)
for para in node.query("head>paralist>paradecl"):
self.on_paradecl(para,fns)
for decl in node.query("funbody>vdecl>decllist>decl"):#vdecl > decllist > decls
self.on_decl_in_func(decl,fns)
fns.statements = node.query("funbody>stlist>st")
fns.freeze()
def on_decl_in_func(self,node,ns):
type = self.on_type(node.child(0))
for id in node.child(1):
ns.set(self.on_token(id),lang.Object(type))
#函数形参定义
def on_paradecl(self,node,ns):
type = self.on_type(node.child(0))
name = self.on_token(node.child(1))
ns.add_param(name,type)
def on_type(self,node):
base = self.on_token(node.child("id"))
try:
base_type = self.current_ns.get(base)
except error.StaticSemanticError, e:
self.add_error(e)
return None
if node.dim > 0:
return lang.Array(base_type, node.dim)
else:
return base_type
def on_condef(self,node,cls):
#print node
name = self.on_token(node.child(0))
value = self.on_token(node.child(-1))
if len(node) > 3:
value = -value
cls.add_var(name,lang.ConstObject(lang.intType,value),'const') # type use lang.intType
def on_token(self,node):
self.current_token = node
return node.value
class Interpreter:
def __init__(self,ast,global_ns):
self.ast = ast
self.global_ns = global_ns
self.current_ns = None
self.call_stack = []
self.current_token = None
def run(self):
self.current_ns = self.global_ns
try:
main_cls = self.current_ns.get("Main")
main = main_cls.op_member_cls("main")
main.call([],self)
#except Exception,e:
except error.LangError,e:
if self.current_token is None:
print >>sys.stderr,e
else:
print >>sys.stderr,"error at line %d near token '%s': %s" %(
self.current_token.lineno,
self.current_token.value,
str(e)
)
print >>sys.stderr, "calling stack "
for x in self.call_stack:
if x[1]:
print >>sys.stderr, "call %s at line %s" %(x[0], x[1])
else:
print >>sys.stderr, "call %s" % (x[0])
#except StandardError,e:
# print >>sys.stderr, "Interpretor inner error "
# raise e
def on_statement(self,node):
node = node.child(0)
if node.type == "cond":
return self.on_cond(node)
elif node.type == "loop":
return self.on_loop(node)
elif node.type == "exp":
return self.on_exp(node)
def on_cond(self,node):
#print node
exp = node.child(2)
st = node.child(4)
if self.on_exp(exp):
return self.on_statement(st)
elif len(node) > 6:
return self.on_statement(node.child(6))
return lang.Object(lang.void)
def on_loop(self,node):
#print node
exp = node.child(2)
ret = lang.Object(lang.void)
while self.on_exp(exp):
if len(node) > 4:
ret = self.on_statement(node.child(4))
return ret
def on_exp(self,node):
#print node
if len(node) > 1:
lhs = self.on_orexp(node.child(0))
self.on_token(node.child(1))
rhs = self.on_orexp(node.child(2))
return lhs.op("assign",rhs)
else:
return self.on_orexp(node.child(0))
def on_orexp(self,node):
#短路计算
if len(node) > 1:
lhs = self.on_orexp(node.child(0))
if lhs:
return lhs
self.on_token(node.child(1))
rhs = self.on_andexp(node.child(2))
return lhs.op("or",rhs)
else:
return self.on_andexp(node.child(0))
def on_andexp(self,node):
if len(node) > 1:
lhs = self.on_andexp(node.child(0))
if not lhs:
return lhs
self.on_token(node.child(1))
rhs = self.on_relexp(node.child(2))
return lhs.op("and",rhs)
else:
return self.on_relexp(node.child(0))
def on_relexp(self,node):
if len(node) > 1:
lhs = self.on_relexp(node.child(0))
m = {
'==':'eq',
'!=':'ne',
'<':'lt',
'>':'gt',
'<=':'le',
'>=':'ge'
}
relop = m[self.on_token(node.child(1).child(0))]
rhs = self.on_term(node.child(2))
return lhs.op(relop,rhs)
else:
return self.on_term(node.child(0))
def on_term(self,node):
if len(node) > 1:
lhs = self.on_term(node.child(0))
op = {'+':'add','-':'minus'}[self.on_token(node.child(1).child(0))]
rhs = self.on_factor(node.child(2))
return lhs.op(op,rhs)
else:
return self.on_factor(node.child(0))
def on_factor(self,node):
if len(node) > 1:
lhs = self.on_factor(node.child(0))
op = {'*':'mul','/':'div','%':'mod'}[self.on_token(node.child(1).child(0))]
rhs = self.on_uniexp(node.child(2))
return lhs.op(op,rhs)
else:
return self.on_uniexp(node.child(0))
def on_uniexp(self,node):
if len(node) > 1:
uniop = {'++':'inc','--':'dec',
'-':'minus_','!':'not','chk':'chk'}[self.on_token(node.child(0).child(0))]
uniexp = self.on_uniexp(node.child(1))
return uniexp.op(uniop)
else:
return self.on_postexp(node.child(0))
def on_postexp(self,node):
if len(node) > 1:
postexp = self.on_postexp(node.child(0))
postfix = node.child(1).child(0)
if postfix.type == 'apara':
#TODO should check postexp is a (func obj) pair
line_no = self.current_token.lineno
if len(postfix) == 2:
ret = postexp.call([],self,line_no)
else:
ret = postexp.call(self.on_apara(postfix),self,line_no)
# read the ')', to set the current_token right
self.on_token(postfix.child(-1))
return ret
elif postfix.type =='index':
return postexp.op("index",self.on_exp(postfix.child(1)))
elif postfix.type == 'aselect':
if isinstance(postexp, lang.Object):
#这里检测但前所在函数是否是postexp 的类。
#如果是,可以访问私有变量
#否则,不能访问私有变量
if self.current_ns.cls == postexp.type:
return postexp.op("member",self.on_token(postfix.child(1)))
else:
return postexp.op("member_no_private",self.on_token(postfix.child(1)))
elif isinstance(postexp, lang.RootClass):
return postexp.op_member_cls(self.on_token(postfix.child(1)))
else:
raise error.UnsupportedOPError("member")
elif postfix.type == 'tcast':
return postexp.op("tcast",self.on_type(postfix.child(1)))
if isinstance(postfix,Leaf):
value = self.on_token(postfix)
if value == '++':
return postexp.op("inc_")
elif value == '--':
return postexp.op("dec_")
else:
return self.on_entity(node.child(0))
def on_type(self,node):
base = self.on_token(node.child("id"))
base_type = self.current_ns.get(base)
if node.dim > 0:
return lang.Array(base_type, node.dim)
else:
return base_type
def on_entity(self,node):
entity = node.child(0)
if entity.type == "cast":
return self.on_cast(entity)
elif entity.type == "alloc":
return self.on_alloc(entity)
elif entity.type == 'string':
return lang.Object(lang.string, entity.value)
elif isinstance(entity,Leaf):
entity = self.on_token(entity)
if isinstance(entity,str):
#if entity == '?': #input
# return self.current_ns.get("read").call([],self)
#else:
#print self.current_ns, "entity......", entity,self.current_ns.get(entity)
#print entity,self.current_ns.name,self.current_ns.ns['this']
return self.current_ns.get(entity)
elif isinstance(entity,int):
return lang.Object(lang.intType, entity)
def on_cast(self,node):
'''cast 的语义? 最后一个statement 的值'''
for x in node.query("stlist>st"):
ret = self.on_statement(x)
return ret
def on_alloc(self,node):
#print node.child(1)
#print self.on_type(node.child(1))
if len(node) == 2:
ret = self.on_type(node.child(1)).alloc()
else:
ret = self.on_type(node.child(1)).alloc(self.on_exp(node.child(3)))
#print "on alloc ",ret
return ret
def on_apara(self,node):
return [self.on_exp(x) for x in node.query("explist>exp")]
def on_token(self,node):
self.current_token = node
return node.value
class StaticTypeChecker(BaseAnnotateAction):
'''静态类型检查和计算'''
#正在标注的属性的名字
annotate_attr_name = 'type'
def __init__(self, ns):
self.global_ns = ns
self.current_ns = ns
self.errors = []
def add_error(self, e):
#print "add error " , e
#raise
self.errors.append(error.Error(self.current_token.lineno, str(e)))
#print >>sys.stderr, error.Error(self.current_token.lineno, str(e))
#sys.exit(-1)
def _do_type_trans(self, node, op, *operands):
node.set_attr(self.annotate_attr_name, self._check_type(op, *operands))
def _check_type(self, op, *operands):
main_type = operands[0]
if len(operands) > 1:
arg = operands[1]
else:
arg = None
is_type_match = lang.do_type_trans(main_type, op, arg)
if not is_type_match:
if op in ('member', 'member_no_private'):
self.add_error(error.MemberError(operands[0], operands[1]))
else:
self.add_error(error.TypeCheckError(op))
return is_type_match
def _on_bin_exp(self, node):
if len(node) >1:
self._do_type_trans(node,
node.child(1).get_attr('op_name'),
node.child(0).get_attr('type'),
node.child(2).get_attr('type')
)
else:
self._copy_from_first_child(node)
def before_classdecl(self, node):
class_name = node.child('id').value
self.current_class = self.global_ns.get(class_name)
#print "enter class" , self.current_class
def on_classdecl(self, node):
self.current_class = None
def before_funbody(self, node):
'''在遍历funcbody 的子节点之前,进去对应的名字空间'''
func_name = node.prev("head").child("id").value
#print "enter func " , func_name
self.current_ns = self.current_class.get_cls_member(func_name)
if self.current_ns.decorate != "static":
self.current_ns.bind(self.current_class.alloc_one())
#print self.current_ns
def on_funbody(self, node):
self.current_ns.unbind()
self.current_ns = self.global_ns
on_st = BaseAnnotateAction._copy_from_first_child
def on_cond(self, node):
node.set_attr(self.annotate_attr_name, lang.void)
def on_loop(self, node):
node.set_attr(self.annotate_attr_name, lang.void)
on_exp = on_orexp = on_andexp = _on_bin_exp
on_relexp = on_term = on_factor = _on_bin_exp
def on_uniexp(self, node):
if len(node) > 1:
self._do_type_trans(node,
node.child(0).get_attr('op_name'),
node.child(1).get_attr('type')
)
else:
self._copy_from_first_child(node)
def on_postexp(self, node):
postexp = node.child(0)
if len(node) > 1:
postfix = node.child(1).child(0)
if isinstance(postfix,Leaf): # '++' or '--'
self._do_type_trans(node, postfix.get_attr('op_name'),postexp.get_attr('type'))
else:#对应不同情况调用下面的辅助函数
getattr(self, "_on_postexp_" + postfix.type)(node)
else:
self._copy_from_first_child(node)
node.set_attr('id_type', node.child(0).get_attr('id_type'))
## 这些辅助函数, 在AST不存在对应类型的节点
def _on_postexp_apara(self, node):
'''函数调用,检查参数类型'''
#print "_on_postexp_apara", node
postexp = node.child(0)
postfix = node.child(1).child(0)
func = postexp.get_attr('type')
#print "function call " , func
args = postfix.query("explist>exp")
#print node, func,node.child(0),node.child(0)._attr
if func.name not in ("eof", "read", "print", "println"):
if len(func.params_type) != len(args):
self.add_error(error.ParamCountNotMatchError(len(func.params_type), len(args)))
else:
for i in range(len(func.params_type)):
self._check_type('argument_pass', func.params_type[i], args[i].get_attr('type'))
node.set_attr('type', func.ret_type)
node.set_attr('id_type', 'obj')
def _on_postexp_index(self, node):
'''数组下标操作'''
postexp = node.child(0)
postfix = node.child(1).child(0)
self._do_type_trans(node, 'index', postexp.get_attr('type'), postfix.child(1).get_attr('type'))
node.set_attr('id_type', 'obj')
def _on_postexp_aselect(self, node):
'''类成员成员获取'''
postexp = node.child(0)
postfix = node.child(1).child(0)
member = postfix.child(1).value
#print "get member %s from %s" %(member, str(postexp))
#print postexp._attr
op_name = None
if postexp.get_attr('id_type') == 'obj':
#这里检测但前所在函数是否是postexp 的类。
#如果是,可以访问私有变量
#否则,不能访问私有变量
if self.current_ns.cls == postexp.get_attr('type'):
op_name = "member"
else:
op_name = "member_no_private"
elif postexp.get_attr('id_type') == 'class':
op_name = "member_cls"
if op_name:
self._do_type_trans(node, op_name, postexp.get_attr('type'), member)
v = node.get_attr('type')
if isinstance(v, Function):
node.set_attr('id_type', 'func')
else:
node.set_attr('id_type', 'obj')
else:
self.add_error(error.MemberError(postexp, member))
def _on_postexp_tcast(self, node):
postexp = node.child(0)
postfix = node.child(1).child(0)
self._do_type_trans(node, 'tcast', postexp.get_attr('type'), postfix.child(1).get_attr('type'))
node.set_attr('id_type', 'obj')
def on_entity(self, node):
node.set_attr('type', node.child(0).get_attr('type'))
node.set_attr('id_type', node.child(0).get_attr('id_type'))
#BaseAnnotateAction._copy_from_first_child
def on_cast(self, node):
node.set_attr('type', node.child('stlist').get_attr('type'))
def on_stlist(self, node):
node.set_attr('type', node.query("st")[-1].get_attr('type'))
def on_alloc(self,node):
if node.query('['):
node.set_attr('type', lang.Array(node.child("type").get_attr('type')))
else:
node.set_attr('type', node.child("type").get_attr('type'))
node.set_attr('id_type', 'obj')
def on_type(self, node):
base = node.child(0).value
try:
base_type = self.current_ns.get(base)
except error.StaticSemanticError, e:
self.add_error(e)
return None
if node.dim > 0:
node.set_attr('type', lang.Array(base_type, node.dim))
else:
node.set_attr('type', base_type)
node.set_attr('id_type', 'class')
def _on_token(self, node):
if node.type == "num" or node.type == '?':
node.set_attr('type', lang.intType)
elif node.type == "id":
try:
#print "get " , node.value , "from ns ", self.current_ns
#print "is in funbody" , node.ancestor("funbody")
#print "is in aselect" , node.ancestor("aselect")
#在函数体里面,并且不是 a.b 这个语法的情况下
if self.current_ns == self.global_ns or node.parent.type == "aselect":
#print "sss",node, node.lineno
return
v = self.current_ns.get(node.value)
#print "get " , node.value , "from ns ",self.current_ns
if isinstance(v, lang.Object):
node.set_attr('type', v.type)
node.set_attr('id_type', 'obj')
elif isinstance(v, Function):
node.set_attr('type', v)
node.set_attr('id_type', 'func')
# elif isinstance(v, lang.Type):
# node.set_attr('type', v)
# node.set_attr('id_type', 'class')
except error.NameError, e :
self.add_error(e)
#if node.get_attr('type') is None:
# print "sssssss", node, node.lineno
self.current_token = node
def do_op_annotate(ast):
annotate_action = OPAnnotate()
ast_walker = BaseASTWalker(ast, annotate_action)
ast_walker.run()
return ast
def do_namespace_parse(ast):
parser = MoreParser(ast)
parser.parse()
if len(parser.errors) > 0:
for x in parser.errors:
print >>sys.stderr, x
return None
return parser.global_ns
#===============================================================================
# class StaticSemWalker(BaseASTWalker):
#
# def walk_classdecl(self, node):
# self._do_action(node, 'before')
# for x in node:
# self._walk_node(x)
# return self._do_action(node)
#===============================================================================
def check_static_semtanic(ast, global_ns):
#try:
check_action = StaticTypeChecker(global_ns)
walker2 = BaseASTWalker(ast, check_action)
walker2.run()
if len(check_action.errors) > 0:
print >>sys.stderr, "found error ", len(check_action.errors)
for e in check_action.errors:
print >>sys.stderr, e
return False
else:
return True
#except StandardError,e:
#print >>sys.stderr, "Interpretor inner error (when do static check)"
#print walker2.current_token
#return False
def old_run(data, input_file = sys.stdin, output_file = sys.stdout):
set_io(input_file, output_file)
ast = parse(data)
parser = MoreParser(ast)
parser.parse()
#print parser.global_ns.ns
inter = Interpreter(ast,parser.global_ns)
inter.run()
#print inter.global_ns.ns
def run(data, input_file = sys.stdin, output_file = sys.stdout):
#print >>sys.stderr, data
try:
set_io(input_file, output_file)
ast = parse(data)
do_op_annotate(ast)
global_ns = do_namespace_parse(ast)
if global_ns:
if check_static_semtanic(ast, global_ns):
pass
inter = Interpreter(ast, global_ns)
inter.run()
except error.ParseError, e:
print >>sys.stderr,e
except ply.lex.LexError,e:
pass
except StandardError,e:
print >>sys.stderr, "Interpretor inner error "
if __name__ == '__main__':
test = open('../../test/ooc/static_sem_test.ooc').read()
run(test)
| Python |
#coding=utf8
#$Id: __init__.py 201 2008-06-03 03:03:11Z Filia.Tao@gmail.com $
lang_info = {
'name' : 'L2',
'path' : 'ooc',
'suffix' : 'ooc',
}
| Python |
#coding=utf8
#$Id: parse.py 206 2008-06-05 13:17:28Z Filia.Tao@gmail.com $
import sys
from ply import yacc
from interpretor.ooc.lex import *
from interpretor.ast import Node,all_to_node,to_graph
from interpretor.ooc import error
start = 'prog'
def p_empty(p):
"empty : "
pass
#程序
def p_prog(p):
'''prog : prog classdecl
| classdecl
'''
if len(p) > 2:
p[0] = Node("prog",p[1].getChildren() + [p[2]])
else:
p[0] = Node("prog",[p[1]])
def p_classdecl(p):
"classdecl : kw_class pos_abstract id pos_superclass '{' pos_condecl pos_static pos_private pos_public pos_redef '}'"
all_to_node(p)
p[0] = Node("classdecl",p[1:])
def p_pos_abstract(p):
'''pos_abstract : kw_abstract
| empty
'''
all_to_node(p)
if p[1] != None:
p[0] = Node("pos_abstract",p[1:])
else:
p[0] = p[1]
def p_pos_superclass(p):
'''pos_superclass : ':' id
| empty
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("pos_superclass",p[1:])
else:
p[0] = p[1]
#可能的常量声明
def p_pos_condecl(p):
'''pos_condecl : condecl ';'
| empty
'''
all_to_node(p)
if p[1] != None:
p[0] = Node("pos_condecl",p[1:])
else:
p[0] = p[1]
def p_condecl(p):
'''condecl : condecl ',' condef
| kw_const condef
'''
all_to_node(p)
if len(p) > 3:
p[0] = Node("condecl",p[1].getChildren() + p[3:])
else:
p[0] = Node("condecl",p[1:])
#常量定义
def p_condef(p):
'''condef : id '=' num
| id '=' '-' num
'''
all_to_node(p)
p[0] = Node("condef",p[1:])
def p_pos_static(p):
'''pos_static : kw_static member
| empty
'''
all_to_node(p)
if p[1] != None:
p[0] = Node("pos_static",p[1:])
else:
p[0] = p[1]
def p_pos_private(p):
'''pos_private : kw_private member
| empty
'''
all_to_node(p)
if p[1] != None:
p[0] = Node("pos_private",p[1:])
else:
p[0] = p[1]
def p_pos_public(p):
'''pos_public : kw_public member
| empty
'''
all_to_node(p)
if p[1] != None:
p[0] = Node("pos_public",p[1:])
else:
p[0] = p[1]
def p_pos_redef(p):
'''pos_redef : kw_redef cfdef_list
| kw_redef
| empty
'''
all_to_node(p)
if p[1] != None:
p[0] = Node("pos_redef",p[1:])
else:
p[0] = p[1]
def p_cfdefs(p):
'''cfdef_list : cfdef_list cfdef
| cfdef
'''
if len(p) > 2:
p[0] = Node("cfdef_list",p[1].getChildren() + [p[2]])
else:
p[0] = Node("cfdef_list",p[1:])
def p_member(p):
'''member : vdecl fdefs
| vdecl
| fdefs
| empty
'''
p[0] = Node("member",p[1:])
def p_vdecl(p):
"vdecl : kw_var decllist kw_end"
all_to_node(p)
p[0] = Node("vdecl",p[1:])
def p_decllist(p):
'''decllist : decl ';' decllist
| decl
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("decllist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("decllist",p[1:])
def p_decl(p):
"decl : type idlist"
p[0] = Node("decl",p[1:])
#类型
def p_type(p):
'''type : type '[' ']'
| id
'''
all_to_node(p)
#FIXME make a ast not a ast ???
if len(p) > 2:
p[0] = p[1]
p[0].dim = p[0].dim + 1
else:
p[0] = Node("type", p[1:])
p[0].dim = 0
def p_idlist(p):
'''idlist : id ',' idlist
| id
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("idlist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("idlist",p[1:])
#函数定义s
def p_fdefs(p):
''' fdefs : fdef fdefs
| fdef
'''
if len(p) > 2:
p[0] = Node("fdefs",[p[1]] + p[2].getChildren())
else:
p[0] = Node("fdefs",p[1:])
def p_fdef(p):
'''fdef : afdef
| cfdef
'''
p[0] = Node("fdef",p[1:])
def p_afdef(p):
'''afdef : kw_func kw_abstract type id '(' type_list ')'
| kw_func kw_abstract type id '(' ')'
'''
all_to_node(p)
p[0] = Node("afdef", p[1:])
def p_typelist(p):
'''type_list : type_list ',' type
| type
'''
if len(p) > 2:
p[0] = Node("type_list",p[1].getChildren() + [p[3]])
else:
p[0] = Node("type_list",p[1:])
def p_cfdef(p):
"cfdef : kw_func type head '{' funbody '}'"
all_to_node(p)
p[0] = Node("cfdef", p[1:])
def p_head(p):
'''head : id '(' ')'
| id '(' paralist ')'
'''
all_to_node(p)
p[0] = Node("head", p[1:])
def p_paralist(p):
'''paralist : paradecl
| paradecl ',' paralist
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("paralist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("paralist",p[1:])
def p_paradecl(p):
"paradecl : type id "
all_to_node(p)
p[0] = Node("paradecl", p[1:])
def p_funbody(p):
'''funbody : vdecl stlist
| stlist
'''
p[0] = Node("funbody", p[1:])
def p_stlist(p):
'''stlist : st ';' stlist
| st
| st ';'
'''
all_to_node(p)
if len(p) == 4:
p[0] = Node("stlist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("stlist",[p[1]])
def p_st(p):
'''st : exp
| cond
| loop
'''
p[0] = Node("st",p[1:])
def p_cond(p):
'''cond : kw_if '(' exp ')' st
| kw_if '(' exp ')' st kw_else st
'''
all_to_node(p)
p[0] = Node("cond",p[1:])
def p_loop(p):
'''loop : kw_while '(' exp ')'
| kw_while '(' exp ')' st
'''
all_to_node(p)
p[0] = Node("loop",p[1:])
def p_exp(p):
'''exp : orexp
| orexp '=' orexp
'''
all_to_node(p)
p[0] = Node("exp",p[1:])
def p_orexp(p):
'''orexp : andexp
| orexp orop andexp
'''
all_to_node(p)
p[0] = Node("orexp",p[1:])
def p_andexp(p):
'''andexp : relexp
| andexp andop relexp
'''
all_to_node(p)
p[0] = Node("andexp",p[1:])
def p_relexp(p):
'''relexp : term
| relexp relop term
'''
all_to_node(p)
p[0] = Node("relexp",p[1:])
def p_relop(p):
'''relop : eqop
| neop
| ltop
| gtop
| leop
| geop
'''
all_to_node(p)
p[0] = Node("relop",p[1:])
def p_term(p):
'''term : factor
| term addop factor
'''
p[0] = Node("term",p[1:])
def p_addop(p):
'''addop : '+'
| '-'
'''
all_to_node(p)
p[0] = Node("addop",p[1:])
def p_factor(p):
'''factor : uniexp
| factor multop uniexp
'''
p[0] = Node("factor",p[1:])
def p_mulop(p):
'''multop : '*'
| '/'
| '%'
'''
all_to_node(p)
p[0] = Node("multop",p[1:])
def p_uniexp(p):
'''uniexp : uniop uniexp
| postexp
'''
p[0] = Node("uniexp",p[1:])
def p_uniop(p):
'''uniop : '-'
| '!'
| incop
| decop
| chkop
'''
all_to_node(p)
p[0] = Node("uniop",p[1:])
def p_postexp(p):
'''postexp : entity
| postexp postfix
'''
p[0] = Node("postexp",p[1:])
def p_postfix(p):
'''postfix : incop
| decop
| apara
| index
| aselect
| tcast
'''
all_to_node(p)
p[0] = Node("postfix",p[1:])
def p_apara(p):
'''apara : '(' explist ')'
| '(' ')'
'''
all_to_node(p)
p[0] = Node("apara",p[1:])
def p_explist(p):
'''explist : exp
| exp ',' explist
'''
all_to_node(p)
if len(p) > 2:
p[0] = Node("explist",[p[1]] + p[3].getChildren())
else:
p[0] = Node("explist",p[1:])
def p_index(p):
"index : '[' exp ']'"
all_to_node(p)
p[0] = Node("index",p[1:])
def p_aselect(p):
"aselect : '.' id"
all_to_node(p)
p[0] = Node("aselect",p[1:])
def p_tcast(p):
"tcast : ':' type"
all_to_node(p)
p[0] = Node("tcast",p[1:])
def p_entity(p):
'''entity : id
| num
| string
| cast
| alloc
'''
all_to_node(p)
p[0] = Node("entity",p[1:])
def p_cast(p):
"cast : '(' stlist ')'"
all_to_node(p)
p[0] = Node("cast",p[1:])
def p_alloc(p):
'''alloc : kw_new type
| kw_new type '[' exp ']'
'''
all_to_node(p)
p[0] = Node("alloc",p[1:])
def p_error(p):
print >>sys.stderr,p
raise error.ParseError(p)
parser = yacc.yacc()
def parse(data):
p = parser.parse(data,debug=0)
return p
if __name__ == '__main__':
#test = open('../../test/ooc/sp.ooc').read()
n = parse(test)
#to_graph(n, "test_ooc")
| Python |
#coding=utf8
#$Id: lang.py 204 2008-06-04 12:56:45Z Filia.Tao@gmail.com $
'''
OOC C 语言只有三种类型。
1. 整形
2. Void
3. 数组
4. 类
注意这个里面变量名是类似java 的引用机制。
null 表示空引用。
怎样处理特殊的null 值? (用Object(nullType,None) 来表示。
从程序中可以看到 null 似乎可以赋值给任何类型的对象。
'''
from interpretor.ooc import error
#class Singleton(type):
# def __call__(cls, *args):
# if not hasattr(cls, 'instance'):
# cls.instance = super(Singleton, cls).__call__(*args)
# return cls.instance
def do_type_trans(main_type, op_name, arg = None):
#print "do_type_trans", main_type, op_name , arg
if op_name == "argument_pass":
return main_type.do_type_trans("assign", arg)
else:
return main_type.do_type_trans(op_name, arg)
#修饰符函数
def require_same(func):
def wrapped(self,lhs,rhs):
if (rhs.type != self):
raise error.TypeError(self,rhs.type)
return func(self,lhs,rhs)
return wrapped
def require_same_or_null(func):
def wrapped(self,lhs,rhs):
if (rhs.type != self and rhs.type != nullType):
raise error.TypeError(self,rhs.type)
return func(self,lhs,rhs)
return wrapped
def require_same_base_or_null(func):
'''
是否相容? .. 逻辑很复杂了。。。
'''
def wrapped(self, lhs, rhs):
base = rhs.org_type
if base == nullType:
return func(self, lhs, rhs)
if lhs.type == rhs.type or lhs.org_type == rhs.type:
return func(self, lhs, rhs)
while(base):
if base == lhs.org_type:
break
else:
base = base.base
else:
#print "tttt", lhs.org_type , rhs.type
raise error.TypeError(lhs.org_type,rhs.org_type)
return func(self,lhs,rhs)
return wrapped
def is_type_castable(obj,type):
'''检测是否可以将obj 转换成 type 类型'''
base = obj.type
while(base):
if base == type:
break
else:
base = base.base
else:
return False
return True
class Type(object):
def __init__(self):
self.name = "type"
self.base = None
def to_str(self,obj):
return str(obj.value)
def do_type_trans(self, op_name, arg = None):
if op_name == 'assign':
if arg == self:
return self
else:
return None
elif op_name in ('eq','ne'):
if arg == self:
return intType
else:
return None
elif op_name == 'tcast':
if self == arg or arg == void:
return arg
else:
return None
else:
return None
@require_same
def op_assign(self,lhs,rhs):
lhs.value = rhs.value
return lhs
@require_same
def op_eq(self,lhs,rhs):
return Object(intType, int(lhs.value is rhs.value))
@require_same
def op_ne(self,lhs,rhs):
ret = self.op_eq(lhs,rhs)
ret.value = [1,0][ret.value]
return ret
def op_tcast(self,obj,type):
'''类型强制转换。
1.相同类型总是可以转换
2.子类向基类转换
3.任何类型向void 转换
转换后的值有如下特征:
obj.org_type = type
#obj.type = type
'''
if obj.type == type:
return obj
elif type == void:
return Object(void)
else:
if is_type_castable(obj,type):
obj.org_type = type
#obj.type = type
return obj
else:
raise error.TCastError(obj,type)
def alloc(self,size = None):
if size:
ret = Object(Array(self))
ret.value = [self.alloc() for i in range(size.value)]
return ret
else:
return self.alloc_one()
def __repr__(self):
return "<ooc Type %s>" %self.name
def __eq__(self,rhs):
return isinstance(rhs, Type) and self.name == rhs.name
def __ne__(self,rhs):
return not self.__eq__(rhs)
__str__ = __repr__
class Void(Type):
def __init__(self):
self.name = "void"
self.base = None
class Integer(Type):
'''Small C 整数类型'''
def __init__(self):
self.name = "int"
self.base = None
def do_type_trans(self, op_name, arg = None):
'''检查类型匹配,并根据操作符返回对应的类型 不匹配时返回 None'''
#print "interger do_type_trans" , op_name, arg
if op_name in ('and', 'or', 'lt', 'gt', 'le',
'ge','add', 'minus', 'mul', 'div' , 'mod',
'assign', 'eq', 'ne'):
if arg and arg == self:
return self
else:
return None
elif hasattr(self, "op_" + op_name):
return self
else:
return super(Integer, self).do_type_trans(op_name, arg)
def asBool(self,obj):
return bool(obj.value)
@require_same
def op_or(self,lhs,rhs):
return Object(intType, int(bool(lhs.value or rhs.value)))
@require_same
def op_and(self,lhs,rhs):
return Object(intType, int(bool(lhs.value and rhs.value)))
@require_same
def op_eq(self,lhs,rhs):
return Object(intType, int(lhs.value == rhs.value))
@require_same
def op_ne(self,lhs,rhs):
return Object(intType, int(lhs.value != rhs.value))
@require_same
def op_lt(self,lhs,rhs):
return Object(intType, int(lhs.value < rhs.value))
@require_same
def op_gt(self,lhs,rhs):
return Object(intType, int(lhs.value > rhs.value))
@require_same
def op_le(self,lhs,rhs):
return Object(intType, int(lhs.value <= rhs.value))
@require_same
def op_ge(self,lhs,rhs):
return Object(intType, int(lhs.value >= rhs.value))
@require_same
def op_add(self,lhs,rhs):
return Object(intType, lhs.value + rhs.value)
@require_same
def op_minus(self,lhs,rhs):
return Object(intType, lhs.value - rhs.value)
@require_same
def op_mul(self,lhs,rhs):
return Object(intType, lhs.value * rhs.value)
@require_same
def op_div(self,lhs,rhs):
return Object(intType, lhs.value / rhs.value)
@require_same
def op_mod(self,lhs,rhs):
return Object(intType, lhs.value % rhs.value)
def op_minus_(self,rhs):
return Object(intType, - rhs.value)
def op_not(self,rhs):
return Object(intType, int(not rhs.value) )
def op_inc(self,rhs):
rhs.value += 1
return rhs
def op_dec(self,rhs):
rhs.value -= 1
return rhs
def op_chk(self,rhs):
if rhs.value == 0:
raise error.ChkFailError()
return rhs
def op_inc_(self,lhs):
ret = Object(intType, lhs.value)
lhs.value += 1
return ret
def op_dec_(self,lhs):
ret = Object(intType, lhs.value)
lhs.value -= 1
return ret
def alloc_one(self):
return Object(self,0)
class Array(Type):
'''Array
'''
def __init__(self, base, dim = 1):
if dim > 1:
self.base_type = Array(base, dim-1)
else:
self.base_type = base
self.name = self.base_type.name + "[]"
self.base = None
def do_type_trans(self, op_name, arg = None):
'''检查类型匹配,并根据操作符返回对应的类型 不匹配时返回 None'''
#print "array do_type_trans" , op_name, arg
if op_name in ('eq', 'ne'):
if (arg == self or arg == nullType):
return intType
else:
return None
elif op_name == "assign":
if (arg == self or arg == nullType):
return self
else:
return None
elif op_name == "index":
if arg == intType:
return self.base_type
else:
return None
elif op_name in ("member_no_private", "member"):
if arg == "length":
return intType
else:
return None
return super(Array, self).do_type_trans(op_name, arg)
def to_str(self, obj):
return '[' + ",".join([x.to_str() for x in obj.value]) + ']'
@require_same
def op_assign(self, lhs, rhs):
lhs.value = rhs.value
return lhs
@require_same
def op_eq(self, lhs, rhs):
return Object(intType, int(lhs.value is rhs.value))
@require_same
def op_ne(self, lhs, rhs):
return Object(intType, int(not (lhs.value is rhs.value)))
def op_index(self, lhs, rhs):
if rhs.type != intType or lhs.value is None:
raise error.TypeError(lhs,rhs)
ind = rhs.value
if ind < 0 or ind >= len(lhs.value):
raise error.IndexError(lhs.value,(0,len(lhs.value)))
return lhs.value[ind]
def alloc_one(self):
return Object(self)
def op_member_no_private(self, lhs, rhs):
'''
array 只支持一个member length
'''
if rhs != "length":
raise error.MemberError(lhs, rhs)
else:
return Object(intType, len(lhs.value))
class RootClass(Type):
'''这个语言是一个类Java 的单根的语言。 这个类是所有类的基类'''
def __init__(self):
self.name = "Object"
self.base = None
def is_base_of(self, arg):
while(arg):
if arg == self:
break
else:
arg = arg.base
else:
return False
return True
def do_type_trans(self, op_name, arg = None):
'''检查类型匹配,并根据操作符返回对应的类型 不匹配时返回 None'''
if op_name in ('eq', 'ne'):
if arg == nullType or self.is_base_of(arg) or arg.do_type_trans(op_name, self):
return intType
else:
return None
elif op_name == "assign":
if arg == nullType or self.is_base_of(arg):
return self
else:
return None
elif op_name == 'tcast':
#TCAST 有可能是向上转型
if self == arg or arg == void or self.is_base_of(arg) or arg.is_base_of(self):
return arg
else:
return None
elif op_name in("member", "op_member_no_private", "op_member_cls"):
return None
return super(RootClass, self).do_type_trans(op_name, arg)
@require_same_base_or_null
def op_assign(self, lhs, rhs):
lhs.type = rhs.type
lhs.value = rhs.value
return lhs
@require_same_base_or_null
def op_eq(self, lhs, rhs):
return Object(intType, int(lhs.value is rhs.value))
@require_same_base_or_null
def op_ne(self,lhs,rhs):
return Object(intType, int(not (lhs.value is rhs.value)))
def insert_public(self,value):
pass
def get_cls_member(self, name, no_private = False):
'''对一个类唯一的成员。 包括static , const 变量 和所有方法
'''
raise error.MemberError(self,name)
def op_member(self,lhs,rhs):
'''
ins.var 这种类型的引用.
可以获得当前类的私有,公有和基类的公有成员
'''
#print "get %s from %s" %(rhs,lhs)
if not isinstance(rhs,str):
raise error.TypeError("id",lhs)
raise error.MemberError(lhs,rhs)
def op_member_no_private(self,lhs,rhs):
'''
ins.var 这种类型的引用
这种方法只可以获得类或其基类的public 成员
'''
if not isinstance(rhs,str):
raise error.TypeError("id",lhs)
raise error.MemberError(lhs,rhs)
def op_member_cls(self,name):
raise error.MemberError(self, name)
class String(RootClass):
'''字符串类型'''
def __init__(self):
self.name = "String"
self.base = rootClass
def to_str(self, obj):
return obj.value
class Class(RootClass):
'''OOC 语言的类。
所有redef 成员认为是public
所有public,const 认为是public
只有private 才是private
'''
def __init__(self,name,global_ns,base = None,decorate = None):
self.name = name
if base is None:
self.base = rootClass
else:
self.base = base
self.global_ns = global_ns
self.decorate = decorate
self.members = {}
self.by_decorate = {
'static':[],
'private':[],
'public':[],
'static':[],
'redef':[],
'const':[]
}
self.by_type = {
'var':[],
'func':[]
}
self.cls_var = {}
def do_type_trans(self, op_name, arg = None):
'''检查类型匹配,并根据操作符返回对应的类型 不匹配时返回 None'''
if op_name in ('eq', 'ne'):
if arg == nullType or self.is_base_of(arg) or arg.do_type_trans(op_name, self):
return intType
else:
return None
if op_name in ("member","member_no_private"):# "member_cls"):
try:
t = self.alloc_one()
ret = getattr(self, "op_" + op_name)(t, arg)
#It is a member function
if not isinstance(ret, Object):
return ret
else:
return ret.type
except error.MemberError:
return None
elif op_name in ("member_cls"):
#print "member_cls"
try:
ret = getattr(self, "op_" + op_name)(arg)
if not isinstance(ret, Object):
return ret
else:
return ret.type
except error.MemberError:
return None
return super(Class, self).do_type_trans(op_name, arg)
def add_var(self,name,value,decorate):
self.members[name] = (value,decorate)
self.by_type['var'].append(name)
self.by_decorate[decorate].append(name)
if decorate == "static":
self.cls_var[name] = Object(value)
elif decorate == "const":
self.cls_var[name] = value
def is_var(self, member):
pass
def is_func(self, member):
pass
def add_func(self,name,value,decorate):
self.members[name] = (value,decorate)
self.by_type['func'].append(name)
self.by_decorate[decorate].append(name)
def op_get(self,lhs,rhs):
#print "get %s from %s" %(rhs,lhs)
try:
return self.op_member(lhs,rhs)
except error.MemberError:
return self.global_ns.get(rhs)
def op_member(self, lhs, rhs):
'''
ins.var 这种类型的引用.
可以获得当前类的私有,公有和基类的公有成员
'''
#print "get %s from %s" %(rhs,lhs)
if not isinstance(rhs,str):
raise error.TypeError("id",lhs)
if rhs in lhs.value:
#实例变量 自己的或基类的
return lhs.value[rhs]
else:
#类变量/函数
ret = self.get_cls_member(rhs)
if not isinstance(ret, Object):
ret = ret.bind(lhs)
return ret
def op_member_no_private(self, lhs, rhs):
'''
ins.var 这种类型的引用
这种方法只可以获得类或其基类的public 成员
'''
if not isinstance(rhs,str):
raise error.TypeError("id",lhs)
if rhs in self.by_decorate['private']:
raise error.MemberError(lhs, rhs)
if rhs in lhs.value:
#实例变量 自己的或基类的
return lhs.value[rhs]
else:
#类变量/函数
ret = self.get_cls_member(rhs, True)
if not isinstance(ret, Object):
ret = ret.bind(lhs)
return ret
def insert_public(self,value):
if self.base:
self.base.insert_public(value)
for name in self.by_type['var']:
if self.members[name][1] in ("public", "redef"):
value[name] = Object(self.members[name][0])
return value
def get_cls_member(self, name, no_private = False):
'''对一个类唯一的成员。 包括static , const 变量 和所有方法
'''
if no_private and name in self.by_decorate['private']:
#TODO 应该用一个更好的提示
raise error.MemberError(self,name)
if name in self.cls_var: #类变量,static 和 const 变量
return self.cls_var[name]
elif name in self.by_type["func"]: #方法
return self.members[name][0]
else:
return self.base.get_cls_member(name, True) #基类的私有成员总是不能访问的
def op_get_cls(self,name):
'''在static 方法中可以访问的名字空间
'''
try:
return self.op_member_cls(name)
except error.MemberError:
return self.global_ns.get(name)
def op_member_cls(self,name):
'''
当 ClassA.var 这样的调用出现时执行的操作
TODO: 是否考虑继承?
'''
if name in self.cls_var: #static 和const 变量
return self.cls_var[name]
elif name in self.by_type['func'] and name in self.by_decorate["static"]: #static 函数
return self.members[name][0]
else: #调用基类
return self.base.op_member_cls(name)
def alloc_one(self):
ret = Object(self)
ret.value = {}
self.base.insert_public(ret.value)
for name in self.by_type['var']:
if self.members[name][1] not in ['const','static']:
ret.value[name] = Object(self.members[name][0])
return ret
def __repr__(self):
ret = "<OOC Type %s{" %self.name
ret += "}>"
return ret
class NullType(Type):
def __init__(self):
self.name = "NullType"
self.base = None
def asBool(self,obj):
return False
@require_same_base_or_null
def op_assign(self,lhs,rhs):
lhs.type = rhs.type
lhs.value = rhs.value
return lhs
def op_eq(self,lhs,rhs):
if isinstance(rhs.type,(RootClass,NullType)):
return Object(intType, int(lhs.value is rhs.value))
else:
raise error.TypeError(lhs,rhs)
def op_ne(self,lhs,rhs):
if isinstance(rhs.type,(RootClass,NullType)):
return Object(intType, int(lhs.value is not rhs.value))
else:
raise error.TypeError(lhs,rhs)
def op_tcast(self,obj,type):
'''类型强制转换
NullType 可以转换成任何类型RootClass 或 Class 类型
'''
if isinstance(type,(RootClass,NullType)):
obj.ort_type = type
obj.type = type
return obj
else:
raise error.TCastError(obj,type)
def __repr__(self):
return "<ooc Type Null>"
__str__ = __repr__
class Object:
def __init__(self,type,value = None):
self.type = type
self.org_type = type
self.value = value
#TODO ugly here
if value is None and type is intType:
self.value = 0
def __nonzero__(self):
if hasattr(self.type, "asBool"):
return self.type.asBool(self)
else:
#raise Erro (cant't convert to bool value)
return bool(self.value)
def __not__(self):
return not self.__nonzero__()
def op(self,op,arg = None):
if hasattr(self.type,"op_"+op):
func = getattr(self.type,"op_"+op)
if arg is not None:
return func(self,arg)
else:
return func(self)
else:
raise error.UnsupportedOPError(op)
def to_str(self):
return self.type.to_str(self)
def __repr__(self):
return "OOC Object <" + repr(self.type) + " : " + repr(self.value) + ">"
__str__ = __repr__
class ConstObject(Object):
def op(self,op,arg = None):
if op == "assign" or op == "inc" or op == "inc_" or op == "dec" or op == "dec_":
raise error.UnsupportedOPError(op)
else:
return Object.op(self,op,arg)
def __repr__(self):
return "OOC Const Object <" + repr(self.value) + " : " + self.type.name+ ">"
__str__ = __repr__
#some special values
intType = Integer()
void = Void()
rootClass = RootClass()
string = String()
nullType = NullType()
null = ConstObject(nullType,None)
| Python |
#coding=utf8
#$Id: error.py 84 2008-04-20 07:07:11Z Filia.Tao@gmail.com $
class ParseError(Exception):
def __init__(self,token):
self.token = token
def __str__(self):
return "Parser error at line %d token '%s'" %(self.token.lineno, self.token.value)
class LangError(Exception):
def __init__(self, msg = ""):
self.msg = msg
def __str__(self):
return "Language Error : %s" %self.msg
class NotLeftValueError(LangError):
pass
class ChkFailError(LangError):
def __str__(self):
return "Chk failed"
class UnsupportedOPError(LangError):
def __init__(self,op):
self.op = op
def __str__(self):
return "unsupported operation '%s'" %(self.op) | Python |
#coding=utf8
#$Id: function.py 205 2008-06-05 04:46:30Z Filia.Tao@gmail.com $
'''Kernel C 函数
'''
import sys
from interpretor.kernelc import lang
from interpretor.kernelc import error
class Namespace(dict):
def __getitem__(self, key):
if not self.has_key(key):
if type(key) is int:
self[key] = lang.Object(lang.intType, 0, True) # is left value
else:
return None #FIXME raise Error?
return dict.__getitem__(self ,key)
class Function:
def __init__(self, name, statements):
self.name = name
self.statements = statements
def call(self, inter, line_no = None):
ret = lang.Object(lang.void)
for st in self.statements:
ret = inter.on_statement(st)
return ret
def __str__(self):
return "\n".join([str(st) for st in self.statements])
__repr__ = __str__
io = {
'input' : sys.stdin,
'output' : sys.stdout,
'input_buff' : "",
'is_eof' : 0
}
def set_io(input_f,output_f):
io['input'] = input_f
io['output'] = output_f
io['input_buff'] = ""
io['is_eof'] = 0
class PrintlnFunc(Function):
'println 函数 换行'
def __init__(self):
self.name = "println"
def call(self,inter, line_no = None):
print >>io['output']
io['output'].flush()
class ReadFunc(Function):
'''read 函数 读入数据'''
def __init__(self):
self.name = "read"
def call(self, inter, line_no = None):
if io["input_buff"]:
inp = io["input_buff"]
io["input_buff"] = ""
while io["input_buff"] == "":
line = io['input'].readline()
if line == "":
io['is_eof'] = 1
break;
else:
io["input_buff"] = line.strip()
else:
while True:
line = io['input'].readline()
if line == "":
raise error.EOFError()
else:
inp = line.strip()
if inp != "":
break;
try:
inp = int(inp)
except ValueError,e:
raise error.LangError("Invalid Input")
return lang.Object(lang.intType, inp)
def __str__(self):
return "read"
__repr__ = __str__
class EofFunc(Function):
'''eof 函数,检测输入是否结束'''
def __init__(self):
self.name = "eof"
def call(self, inter, line_no = None):
while not io["input_buff"] and not io['is_eof']:
line = io['input'].readline()
if line == "":
io['is_eof'] = 1
else:
io["input_buff"] = line.strip()
return lang.Object(lang.intType, io["is_eof"])
def __str__(self):
return "eof"
__repr__ = __str__
def get_built_in_ns():
built_in_ns = Namespace()
built_in_ns.update({
'read':ReadFunc(),
'eof':EofFunc(),
'println':PrintlnFunc(),
})
return built_in_ns
| Python |
#coding=utf8
#$Id: lex.py 205 2008-06-05 04:46:30Z Filia.Tao@gmail.com $
#Copyright 2007 Tao Fei (filia.tao@gmail.com)
#Released under GPL V3 (or later)
#see http://www.gnu.org/copyleft/gpl.html for more details
import ply.lex as lex
tokens = ('id', 'num',
'orop','andop','eqop', 'neop', 'ltop', 'gtop', 'leop', 'geop', 'chkop', 'incop', 'decop',
'kw_func', 'kw_while', 'kw_if', 'kw_else',
'io_print',
#'io_println'
)
literals = ['(', ')', '{', '}', ';', '?', '#', '=', '+', '-', '*', '/', '%' ,'!', '@' ]
t_orop = r'\|\|'
t_andop = r'&&'
t_eqop = r'=='
t_neop = r'!='
t_ltop = r'<'
t_gtop = r'>'
t_leop = r'<='
t_geop = r'>='
t_incop = r'\+\+'
t_decop = r'--'
def t_num(t):
r'\d+'
try:
t.value = int(t.value,10);
except ValueError:
print "Number %s is bad!" % t.value
t.value = 0
return t
reserved = {
"func": "kw_func",
"while": "kw_while",
"if": "kw_if",
"else": "kw_else",
"chk": "chkop",
"print": "io_print",
#"println": "io_println",
#"read": "io_read",
#"eof": "io_eof"
}
def t_id(t):
r'\b[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'id') # Check for reserved words
return t
def t_newline(t):
r'\n'
t.lexer.lineno += 1
t_ignore = ' \r\t\v'
def t_error(t):
print "Illegal character '%s' on line %d " % (t.value[0],t.lexer.lineno)
t.lexer.skip(1)
lex.has_error = True
# Compute column.
# input is the input text string
# token is a token instance
def find_column(input,token):
i = token.lexpos
while i > 0:
if input[i] == '\n': break
i -= 1
column = (token.lexpos - i)+1
return column
test = '''
func gcd {
chk (*1>1 && *2>1);
while (*2!=0)
(
3=*1%*2;
1=*2;
2=*3
);
*1
}
func main {
while (!eof())
(
1=read();
if (!eof())(
2=read();
print(gcd())
)
)
}
'''
test1 = '''
func gcd {
1=*1+*2;
*1=*2;
2=4;
chk (*(*1+1)>1 && *(*1+2)>1);
while (*(*1+2)!=0)(
*1+3=*(*1+1)%*(*1+2);
*1+1=*(*1+2);
*1+2=*(*1+3)
);
3=*(*1+1);
2=**1;
1=*1-*2;
*3
}
func main {
1=10;
2=2;
while (!eof())(
*1+1=read();
if (!eof())(
*1+*2+1=*(*1+1);
*1+*2+2=read();
print(gcd());
println()
)
)
}
'''
lex.lex()
if __name__ == '__main__':
lexer = lex.lex()
lexer.input(test)
while 1:
tok = lexer.token()
if not tok: break
print tok
| Python |
#coding=utf8
#$Id: interp.py 205 2008-06-05 04:46:30Z Filia.Tao@gmail.com $
'''
KernelC 语言解释器
工作在抽象语法树上。
由于KernelC 语言极端简单。没有作用域等等概念。
只有一个全局名字空间
* 所有函数
* 所有数字变量
因此简单的使用字典就可以记录所有的信息了。
但是有一个问题:
如何区分普通的数字变量 和 引用意义上的数字.
执行 = 操作的语义如何处理.
'''
import operator
import sys
from interpretor.kernelc import lang
from interpretor.kernelc import error
from interpretor.kernelc.function import Namespace,Function,get_built_in_ns
from interpretor.kernelc.parse import parse
from interpretor.kernelc.lex import test
from interpretor.ast import Node,Leaf,BaseASTWalker,BaseAnnotateAction
from interpretor.common import CommonOPAnnotate as OPAnnotate
class MoreParser:
'''在AST 基础上进一步处理'''
def __init__(self,ast):
self.ast = ast
self.ns = get_built_in_ns()
def parse(self):
'''walk the ast , build the golbal namespace'''
#函数
for n in self.ast.query("fdef"):
self.on_fdef(n,self.ns)
def on_fdef(self,node,ns):
'函数定义'
name = self.on_token(node.child(1))
self.ns[name] = Function(
name,
node.query("stlist>st")
)
def on_token(self,node):
'终结符'
self.current_token = node #记录当前终结符。调试用
return node.value
class Interpreter:
def __init__(self, ast, ns):
self.ast = ast
self.ns = ns
self.current_token = None
self.call_stack = []
def run(self):
try:
self.ns["main"].call(self)
except (error.LangError ),e:
if self.current_token is None:
print >>sys.stderr,e
else:
print >>sys.stderr, "error at line %d near token '%s': %s" %(self.current_token.lineno,self.current_token.value,str(e))
print >>sys.stderr, "calling stack "
for x in self.call_stack:
if x[1]:
print >>sys.stderr, "call %s at line %s" %(x[0], x[1])
else:
print >>sys.stderr, "call %s" % (x[0])
except StandardError,e:
print >>sys.stderr, "Interpretor inner error "
raise
def on_node(self, node):
if isinstance(node, Leaf):
return self.on_token(node)
else:
if hasattr(self, 'on_' + node.type):
return getattr(self, 'on_' + node.type)(node)
else:
if len(node) == 1:
return self.on_node(node.child(0))
else:
print >>sys.stderr, "not such node ", node.type, node
on_statement = on_node
def on_cond(self,node):
exp = node.child(2)
if self.on_exp(exp):
return self.on_statement(node.child(4))
elif len(node) > 6:
return self.on_statement(node.child(6))
return None
def on_loop(self,node):
exp = node.child(2)
ret = None
while self.on_exp(exp):
if len(node) > 4:
ret = self.on_statement(node.child(4))
return ret
def on_exp(self,node):
#print node
if len(node) > 1:
#print "assing " , node
lhs = self.on_orexp(node.child(0))
self.on_token(node.child(1))
rhs = self.on_orexp(node.child(2))
#赋值形式为 n1 = n2
#表示 self.ns[n1] = n2
return self.ns[lhs.value].op("assign", rhs)
else:
return self.on_orexp(node.child(0))
def on_orexp(self,node):
if len(node) > 1:
lhs = self.on_orexp(node.child(0))
if lhs:
return lhs
self.on_token(node.child(1))
rhs = self.on_andexp(node.child(2))
return lhs.op("or",rhs)
else:
return self.on_andexp(node.child(0))
def on_andexp(self,node):
if len(node) > 1:
lhs = self.on_andexp(node.child(0))
if not lhs:
return lhs
self.on_token(node.child(1))
rhs = self.on_relexp(node.child(2))
return lhs.op("and",rhs)
else:
return self.on_relexp(node.child(0))
def on_relexp(self,node):
if len(node) > 1:
lhs = self.on_relexp(node.child(0))
m = {
'==':'eq',
'!=':'ne',
'<':'lt',
'>':'gt',
'<=':'le',
'>=':'ge'
}
relop = m[self.on_token(node.child(1).child(0))]
rhs = self.on_term(node.child(2))
return lhs.op(relop, rhs)
else:
return self.on_term(node.child(0))
def on_term(self,node):
if len(node) > 1:
lhs = self.on_term(node.child(0))
op = {'+':'add','-':'minus'}[self.on_token(node.child(1).child(0))]
rhs = self.on_factor(node.child(2))
return lhs.op(op,rhs)
else:
return self.on_factor(node.child(0))
def on_factor(self,node):
if len(node) > 1:
lhs = self.on_factor(node.child(0))
op = {'*':'mul','/':'div','%':'mod'}[self.on_token(node.child(1).child(0))]
rhs = self.on_uniexp(node.child(2))
return lhs.op(op,rhs)
else:
return self.on_uniexp(node.child(0))
def on_uniexp(self,node):
if len(node) > 1:
op = self.on_token(node.child(0).child(0))
uniexp = self.on_uniexp(node.child(1))
if op == '*':
return self.ns[uniexp.value]
else:
uniop = {'++':'inc','--':'dec','-':'minus_',
'!':'not','chk':'chk',
'@':'print', 'print':'print', 'println': 'println'}[op]
return uniexp.op(uniop)
else:
return self.on_postexp(node.child(0))
def on_postexp(self,node):
if len(node) > 1:
postexp = self.on_postexp(node.child(0))
postfix = node.child(1).child(0)
if isinstance(postfix,Leaf):
value = self.on_token(postfix)
if value == '++':
return postexp.op("inc_")
elif value == '--':
return postexp.op("dec_")
else:
return self.on_entity(node.child(0))
def on_entity(self,node):
''' 实体'''
if len(node) > 1: # 函数调用
func = self.ns[self.on_token(node.child(0))]
return func.call(self)
else:
entity = node.child(0)
if entity.type == "cast":
return self.on_cast(entity)
elif isinstance(entity,Leaf):
entity = self.on_token(entity)
if isinstance(entity,str):
if entity == '?': #input
return self.ns['read'].call()
elif entity == '#' or entity == 'println':
print >>sys.stderr, "error entity"
pass #do print
else:
print >>sys.stderr, "error entity"
pass #TODO raise ERROR
elif isinstance(entity,int): #数字
return lang.Object(lang.intType,entity)
else:
print >>sys.stderr, "error entity"
pass #TODO raise error
def on_cast(self,node):
'''cast 的语义? 最后一个statement 的值'''
for x in node.query("stlist>st"):
ret = self.on_statement(x)
return ret
def on_token(self,node):
self.current_token = node
return node.value
def run(data, input_file = sys.stdin, output_file = sys.stdout):
#set_io(input_file, output_file)
try:
ast = parse(data)
ast = do_op_annotate(ast)
parser = MoreParser(ast)
parser.parse()
#print parser.ns
inter = Interpreter(ast, parser.ns)
inter.run()
except error.ParseError,e:
print >>sys.stderr,e
#print inter.global_ns.ns
def do_op_annotate(ast):
annotate_action = OPAnnotate()
ast_walker = BaseASTWalker(ast, annotate_action)
ast_walker.run()
return ast
if __name__ == '__main__':
run(test)
| Python |
#coding=utf8
#$Id: __init__.py 201 2008-06-03 03:03:11Z Filia.Tao@gmail.com $
lang_info = {
'name' : 'L0',
'path' : 'kernelc',
'suffix' : 'kec',
}
| Python |
#coding=utf8
#$Id: parse.py 84 2008-04-20 07:07:11Z Filia.Tao@gmail.com $
from ply import yacc
from interpretor.kernelc.lex import *
from interpretor.ast import Node,all_to_node
import interpretor.kernelc.error as error
start = 'prog'
def p_prog(p):
'''prog : prog fdef
| fdef
'''
if len(p) > 2 :
p[0] = Node("prog", p[1].getChildren() + [p[2]])
else:
p[0] = Node("prog", p[1:])
def p_fdef(p):
"fdef : kw_func id '{' stlist '}'"
all_to_node(p)
p[0] = Node("fdef", p[1:])
def p_stlist(p):
'''stlist : stlist ';' st
| st
'''
all_to_node(p)
if len(p) > 3 :
p[0] = Node("stlist", p[1].getChildren() + [p[3]])
else:
p[0] = Node("stlist", p[1:])
def p_st(p):
'''st : exp
| cond
| loop
'''
p[0] = Node("st",p[1:])
#def p_print_st(p):
# ''' print_st : io_print '(' exp ')'
# | io_print '(' ')'
# '''
# all_to_node(p)
# p[0] = Node("print_st",p[1:])
#
#def p_println_st(p):
# ''' println_st : io_println '(' exp ')'
# | io_println '(' ')'
# '''
# all_to_node(p)
# p[0] = Node("println_st",p[1:])
def p_cond(p):
'''cond : kw_if '(' exp ')' st
| kw_if '(' exp ')' st kw_else st
'''
all_to_node(p)
p[0] = Node("cond",p[1:])
def p_loop(p):
'''loop : kw_while '(' exp ')'
| kw_while '(' exp ')' st
'''
all_to_node(p)
p[0] = Node("loop",p[1:])
def p_exp(p):
'''exp : orexp
| orexp '=' orexp
'''
all_to_node(p)
p[0] = Node("exp",p[1:])
def p_orexp(p):
'''orexp : andexp
| orexp orop andexp
'''
all_to_node(p)
p[0] = Node("orexp",p[1:])
def p_andexp(p):
'''andexp : relexp
| andexp andop relexp
'''
all_to_node(p)
p[0] = Node("andexp",p[1:])
def p_relexp(p):
'''relexp : term
| relexp relop term
'''
all_to_node(p)
p[0] = Node("relexp",p[1:])
def p_relop(p):
'''relop : eqop
| neop
| ltop
| gtop
| leop
| geop
'''
all_to_node(p)
p[0] = Node("relop",p[1:])
def p_term(p):
'''term : factor
| term addop factor
'''
p[0] = Node("term",p[1:])
def p_addop(p):
'''addop : '+'
| '-'
'''
all_to_node(p)
p[0] = Node("addop",p[1:])
def p_factor(p):
'''factor : uniexp
| factor multop uniexp
'''
p[0] = Node("factor",p[1:])
def p_mulop(p):
'''multop : '*'
| '/'
| '%'
'''
all_to_node(p)
p[0] = Node("multop",p[1:])
def p_uniexp(p):
'''uniexp : uniop uniexp
| postexp
'''
p[0] = Node("uniexp",p[1:])
def p_uniop(p):
'''uniop : '-'
| '!'
| incop
| decop
| chkop
| '*'
| '@'
| '#'
| io_print
'''
all_to_node(p)
p[0] = Node("uniop",p[1:])
def p_postexp(p):
'''postexp : entity
| postexp postfix
'''
p[0] = Node("postexp",p[1:])
def p_postfix(p):
'''postfix : incop
| decop
'''
all_to_node(p)
p[0] = Node("postfix",p[1:])
def p_entity(p):
'''entity : num
| id '(' ')'
| '?'
| '#'
| cast
'''
all_to_node(p)
p[0] = Node("entity",p[1:])
def p_cast(p):
"cast : '(' stlist ')'"
all_to_node(p)
p[0] = Node("cast",p[1:])
def p_error(p):
#print p , "at line " , p.lineno
raise error.ParseError(p)
parser = yacc.yacc()
def parse(data):
p = parser.parse(data)
return p
if __name__ == '__main__':
n = parse(test)
print len(n.query("fdef"))
| Python |
#coding=utf8
#$Id: lang.py 84 2008-04-20 07:07:11Z Filia.Tao@gmail.com $
'''
KernelC 只有一个 int 类型。
同时数字有可以作为变量名。 使用* 操作符。
'''
import interpretor.kernelc.error as error
class Type:
def op_print(self, obj):
print obj.value,
def op_println(self, obj):
print obj.value
class Void(Type):
def __init__(self):
self.name = "void"
class Integer(Type):
'''Kernel C 整数类型'''
def __init__(self):
self.name = "int"
def asBool(self,obj):
return bool(obj.value)
def op_assign(self, lhs, rhs):
lhs.value = rhs.value
return lhs
def op_chk(self, obj):
if obj.value == 0:
raise error.ChkFailError()
else:
return obj
def op_or(self,lhs,rhs):
return Object(intType, int(bool(lhs.value or rhs.value)))
def op_and(self,lhs,rhs):
return Object(intType, int(bool(lhs.value and rhs.value)))
def op_eq(self,lhs,rhs):
return Object(intType, int(lhs.value == rhs.value))
def op_ne(self,lhs,rhs):
return Object(intType, int(lhs.value != rhs.value))
def op_lt(self,lhs,rhs):
return Object(intType, int(lhs.value < rhs.value))
def op_gt(self,lhs,rhs):
return Object(intType, int(lhs.value > rhs.value))
def op_le(self,lhs,rhs):
return Object(intType, int(lhs.value <= rhs.value))
def op_ge(self,lhs,rhs):
return Object(intType, int(lhs.value >= rhs.value))
def op_add(self,lhs,rhs):
return Object(intType, lhs.value + rhs.value)
def op_minus(self,lhs,rhs):
return Object(intType, lhs.value - rhs.value)
def op_mul(self,lhs,rhs):
return Object(intType, lhs.value * rhs.value)
def op_div(self,lhs,rhs):
return Object(intType, lhs.value / rhs.value)
def op_mod(self,lhs,rhs):
return Object(intType, lhs.value % rhs.value)
#以下为单目操作
def op_minus_(self,rhs):
return Object(intType, - rhs.value)
def op_not(self,rhs):
return Object(intType, int(not rhs.value) )
def op_inc(self,rhs):
rhs.value += 1
return rhs
def op_dec(self,rhs):
rhs.value -= 1
return rhs
def op_inc_(self,lhs):
ret = Object(intType, lhs.value)
lhs.value += 1
return ret
def op_dec_(self,lhs):
ret = Object(intType, lhs.value)
lhs.value -= 1
return ret
class Object:
def __init__(self, type, value = None, is_left_value = False):
self.type = type
self.value = value
self.is_left_value = is_left_value
#TODO ugly here
if value is None and type is intType:
self.value = 0
def __nonzero__(self):
if hasattr(self.type, "asBool"):
return self.type.asBool(self)
else:
#raise Erro (cant't convert to bool value)
return bool(self.value)
def __not__(self):
return not self.__nonzero__()
def op(self,op,arg = None):
if hasattr(self.type,"op_"+op):
if (not self.is_left_value) and op == "assign":
raise error.NotLeftValueError()
func = getattr(self.type,"op_"+op)
if arg is not None:
return func(self,arg)
else:
return func(self)
else:
#pass
raise error.UnsupportedOPError(op)
def __repr__(self):
return str((self.type ,self.value))
__str__ = __repr__
intType = Integer()
void = Void() | Python |
#coding=utf8
#$Id: ast.py 203 2008-06-04 11:55:03Z Filia.Tao@gmail.com $
'''
AST Moudle
抽象语法树模块,提供
# 节结点
# 叶结点
# 子/父结点查询
# 导出成图片
# 通用遍历算法
# AST 线性化?
'''
class Node:
def __init__(self, type, children=[],prod = None):
self.type = type
self.children = [x for x in children if x is not None] #filter(lambda child:isinstance(child,Node),children)
#self.prod = prod #记录下原始的产生式对象,也许会用到。 不对不是一一对应的...
self._attr = {}
self.parent = None
for child in self.children:
if child is not None:
child.parent = self
def getChildren(self):
return self.children
def __iter__(self):
for n in self.children:
yield n
def __getitem__(self, ind):
return self.children[ind]
# def __getattr__(self, attr):
# #create Getter and Setter methods for node attribute
# if attr.startswith("set_"):
# attr_name = attr.split('_')[1]
# t = lambda x, a=attr_name : self._attr.__setitem__(a, x)
# self.__setattr__(attr, t)
# return t
# elif attr.startswith("get_"):
# attr_name = attr.split('_')[1]
# t = lambda a=attr_name : self._attr.__getitem__(a)
# self.__setattr__(attr, t)
# return t
def __len__(self):
return len(self.children)
def child(self, q):
if isinstance(q, int):
return self.children[q]
elif isinstance(q, str):
result = self.query(q)
if len(result) == 1:
return result[0]
def ancestor(self, type):
p = self.parent
while(p):
if p.type == type:
break
else:
p = p.parent
return p
def prev_all(self, node_type):
siblings = self.parent.getChildren()
ret = []
for sibling in siblings:
if sibling is self:
break
elif sibling.type == node_type:
ret.append(sibling)
return ret
def prev(self, type):
r = self.prev_all(type)
if len(r) >= 1:
return r[0]
else:
return None
def query(self,q="*"):
'''查询的语法如下 [type|*] {>[type|*]}
eg. fdef 类型为fdef 的子结点
fdef>vdecl 类型为fdef的子结点下面的类型为vdecl的子结点
*>exp 第二层的exp结点
**>exp 所有类型为exp 的结点。(不管层次)
××>?所有叶结点
? 表示叶结点
'''
ret = []
qs = q.split(">")
for child in self.children:
if child is None:
continue
if child.is_type_match(qs[0]) or qs[0] == '*':
if len(qs) > 1:
ret.extend(child.query(">".join(qs[1:])))
else:
ret.append(child)
elif qs[0] == '**':
if len(qs) == 2:
if child.is_type_match(qs[1]):
ret.append(child)
ret.extend(child.query(q))
return ret
def is_type_match(self, type):
#TODO ? is no longer work , as we do have a ? type
if isinstance(self, Leaf) and type == '?':
return True
if self.type == type:
return True
return False
def set_attr(self, name, value):
self._attr[name] = value
def get_attr(self, name):
r = self._attr.get(name, None)
#if r is None:
#print "get %s from Node %s got None" %(name , self)
return r
def get_postion(self):
lextokens = self.query("**>?")
return (lextokens[0].lineno, lextokens[-1].lineno)
def __repr__(self):
#return "<Node type=%s [%s]>" %(self.type, str(self.children))
return " ".join([repr(x) for x in self.query("**>?")])
__str__ = __repr__
class Leaf(Node):
def __init__(self, type, value, lineno, lexpos):
self.type = type
self.value = value
self.lineno = lineno
self.lexpos = lexpos
self.children = []
self._attr = {}
self.set_attr('value', self.value)
def __len__(self):
return 0
def query(self,qs):
return []
def __repr__(self):
return str(self.value)
#return str("<Leaf : %s>" %(self.value,))
__str__ = __repr__
def all_to_node(p):
for i in range(len(p)):
if p[i] is not None and not isinstance(p[i], Node):
token = p.slice[i]
p[i] = Leaf(token.type, token.value, token.lineno, token.lexpos)
#导出成图片
#using pydot and graphviz
try:
import pydot
def node_to_graph(node, graph):
if node is None:
return None
parent = pydot.Node(
id(node)
)
if isinstance(node, Leaf):
parent.set_label('%s' %(str(node.value)))
else:
parent.set_label(node.type)
graph.add_node(parent)
#print parent.to_string()
for n in node:
child = node_to_graph(n, graph)
if child:
graph.add_edge(pydot.Edge(parent, child))
return parent
def to_graph(root, graph_name):
graph = pydot.Dot()
node_to_graph(root, graph)
#print graph.to_string()
graph.write_png(graph_name + '.png', prog='dot')
except ImportError:
def to_graph(root, graph_name):
print "pydot is not installed.to_graphp will not work"
class BaseASTWalker:
'''深度优先,后序遍历'''
def __init__(self, root, action):
self.root = root
self.action = action
self.current_token = None
def _walk_node(self, node):
'''Proxy Method to walk the tree
Check if a walk_xxx is define ,if so call it
otherwise call _default_walk
'''
if hasattr(self, 'walk_' + node.type):
return getattr(self, 'walk_' + node.type)(node)
else:
if isinstance(node, Leaf):
return self._default_walk(node)
else:
return self._default_walk(node)
def _default_walk(self, node):
self._do_action(node, 'before')
for x in node:
self._walk_node(x)
return self._do_action(node)
def _do_action(self, node, when = "on"):
#when shoub be one of ('before', 'on')
assert when in ('before', 'on')
if hasattr(self.action, '%s_%s' %(when, node.type)):
getattr(
self.action,
'%s_%s' %(when, node.type)
)(node)
elif isinstance(node, Leaf) and hasattr(self.action, '_%s_token'%(when,)):
return getattr(self.action, '_%s_token'%(when,))(node)
else:
return self._default_action(node)
def _default_action(self, node):
pass
def _walk_token(self, node):
self.current_token = node
self._do_action(node)
return node.value
def run(self):
return self._walk_node(self.root)
class WFWalker(BaseASTWalker):
'''广度优先遍历算法'''
def __init__(self, root, action):
self._to_visit = []
super(WFWalker, "__init__")(root, action)
def _walk_node(self, node):
'''Proxy Method to walk the tree
Check if a walk_xxx is define ,if so call it
otherwise call _default_walk
'''
if hasattr(self, 'walk_' + node.type):
getattr(self, 'walk_' + node.type)(node)
else:
if isinstance(node, Leaf):
self._walk_token(node)
else:
self._walk_node(node)
def _default_walk(self, node):
self._do_action(node)
#将子结点放入到待访问队列中
self._to_visit.extend(node.getChildren())
#取出下一个结点
next = self._to_visist.pop(0)
self._walk_node(next)
class ScopeWalker(BaseASTWalker):
'''一个考虑作用域的遍历方式'''
def set_ns(self, ns):
self.ns = ns
def get_ns(self):
return self.ns
def walk_fdef(self, node):
old_ns = self.ns
func_name = node.query("head>id")[0].value
self.ns = self.ns[func_name]
self._walk_node(node)
self.ns = old_ns
class BaseAnnotateAction:
'''用于属性标记的操作器'''
#正在标注的属性的名字
annotate_attr_name = 'FIXME'
def _copy_from_child(self, node, index):
#print "copy attr " , self.annotate_attr_name , "from child"
#print self.annotate_attr_name, node.child(index)._attr
#assert node.child(index).get_attr(self.annotate_attr_name) is not None
node.set_attr(self.annotate_attr_name , node.child(index).get_attr(self.annotate_attr_name))
def _copy_from_first_child(self, node):
self._copy_from_child(node, 0)
def _copy_from_parent(self, node):
if node.parent:
node.set_attr(self.annotate_attr_name, node.parent.get_attr(self.annotate_attr_name))
def get_node_attr(self, node):
#assert node.get_attr(self.annotate_attr_name) is not None
return node.get_attr(self.annotate_attr_name)
def set_node_attr(self, node, value):
#assert value is not None
return node.set_attr(self.annotate_attr_name, value)
#根据parse.py 文件内容 生成抽象语法树遍历程序
def gen_action(lang):
import types
p = __import__('interpretor.%s.parse' %(lang,), fromlist = ['interpretor' ,lang])
walker_src= '''#coding=utf8
# This file is automatically generated. Do not edit.
class BaseASTAction:
'''
ldict = p.__dict__
symbols = [ldict[f] for f in ldict.keys()
if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
and ldict[f].__name__[2:] not in p.ast_ommit)]
symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
for x in symbols:
walker_src += '''
def on_%s(self, node):
pass
''' %(x.__name__[2:])
walker_src += '''
def _on_token(self, node):
pass
'''
return walker_src
if __name__ == "__main__":
outf = open("smallc/astaction.py", 'w')
print >>outf, gen_action("smallc")
| Python |
#coding=utf8
#$Id: common.py 199 2008-05-30 13:53:45Z Filia.Tao@gmail.com $
from interpretor.ast import Node,Leaf,BaseASTWalker,BaseAnnotateAction
class CommonOPAnnotate(BaseAnnotateAction):
'''标注操作符类型
将 + => 'add' , '-' => 'sub' 等等
这个部分L1 和 L2 是一样的
'''
annotate_attr_name = 'op_name'
op_map = {
'=' : 'assign',
'&&' : 'and',
'||' : 'or',
'==' : 'eq',
'!=' : 'ne',
'<' : 'lt',
'>' : 'gt',
'<=' : 'le',
'>=' : 'ge',
'+' : 'add',
'-' : 'minus',
'*' : 'mul',
'/' : 'div',
'%' : 'mod',
'++' : 'inc',
'--' : 'dec',
#'-' : 'minus_',
'!' : 'not',
'chk': 'chk',
#'++' : 'inc_',
#'--' : 'dec_',
}
#有多种含义的操作符
multi_op = ('-', '++', '--')
#加了个_ 只是为了跟可能出现的on_token区分
def _on_token(self, node):
if node.value not in self.op_map:
return
op_name = self.op_map[node.value]
if node.value in self.multi_op:
if node.value == '-' and node.parent.type == 'uniop':
op_name = op_name + '_'
elif (node.value == '++' or node.value == '--') and node.parent.type in ('postfix', 'postexp'): #FIXME
op_name = op_name + '_'
node.set_attr(self.annotate_attr_name, op_name)
on_relop = BaseAnnotateAction._copy_from_first_child
on_addop = on_multop = on_uniop = BaseAnnotateAction._copy_from_first_child
#===============================================================================
# class TypeConstraint:
# '''类型约束,用于静态类型检查'''
# def __init__(self):
# self._rules = {}
#
#
# @staticmethod
# def is_same(*operands):
# if len(operands) != 2:
# return False
# else:
# return operands[0] == operands[1]
#
#
# @staticmethod
# def is_a(type, which = None):
# '''是否是类型....
# 注意type 是语言类型对应的Python 类。(比如Integer, Array, Struct 等等 而不是
# intType , Void 等等
# '''
# def wrapped(*operands):
# if which is None:
# for x in operands:
# if not isinstance(x, type) :
# return False
# return True
# else:
# return isinstance(operands[which], type)
# return wrapped
#
# @staticmethod
# def is_in(type_set, which = None):
# ''' type_set 的含义见上面(is_a)的说明'''
# def wrapped(*operands):
# if which is None:
# for x in operands:
# if not x.__class__ in type_set :
# return False
# return True
# else:
# return operands[which].__class__ in type_set
# return wrapped
#
#
# @staticmethod
# def has_op(op_name, operand):
# return hasattr(operand, "op_" + op_name)
#
#
# def add(self, op_name, cons, for_type = 'all'):
#
# if op_name not in self._rules:
# self._rules[op_name] = []
# self._rules[op_name].append((for_type, cons))
#
#
# def check(self, op_name, *operands):
# '''根据操作名和参数检查是否满足类型约束'''
# assert len(operands) >= 1 #操作数总至少有一个吧?
# #print operands
# #首先我们需要类型是否支持该操作符
# if not self.has_op(op_name, operands[0]):
# print "operation %s is supported by the %s " %(op_name, operands[0])
# return False
# if op_name in self._rules:
# for (for_type, func) in self._rules[op_name]:
# if for_type != 'all' and not isinstance(operands[0], for_type):
# continue
# if not func(*operands):
# print "check type failed on " , func, "for" , op_name , " with " , operands
# return False
# return True
#===============================================================================
| Python |
#coding=utf8
#$Id: __init__.py 95 2008-04-23 05:35:01Z Filia.Tao@gmail.com $
'''
几个语言的解释器
'''
version = '0.2'
author = 'Tao Fei (Filia.Tao@gmail.com)'
| Python |
#coding=utf8
#$Id: smallctest.py 199 2008-05-30 13:53:45Z Filia.Tao@gmail.com $
'''Unit Test For interpretor.smallc package'''
import unittest
from test import BaseTestCase, build_test_suit
def filter(f):
return True
return f.find("quicksort") != -1
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(build_test_suit('smallc', filter)) | Python |
#coding=utf8
#$Id: kernelctest.py 205 2008-06-05 04:46:30Z Filia.Tao@gmail.com $
'''Unit Test For interpretor.ooc package'''
import unittest
from test import BaseTestCase, build_test_suit
def filter(f):
return True
#return f.find("sp") != -1
if __name__ == "__main__":
unittest.TextTestRunner(verbosity = 2).run(build_test_suit('ooc', filter)) | Python |
#coding=utf8
#$Id: ooctest.py 204 2008-06-04 12:56:45Z Filia.Tao@gmail.com $
'''Unit Test For interpretor.ooc package'''
import unittest
from test import BaseTestCase, build_test_suit
def filter(f):
#return True
return f.find("sp") != -1
if __name__ == "__main__":
unittest.TextTestRunner(verbosity = 2).run(build_test_suit('ooc', filter)) | Python |
#coding=utf8
#$Id: __init__.py 95 2008-04-23 05:35:01Z Filia.Tao@gmail.com $
'''
测试用公共函数
'''
import re
import StringIO
import unittest
import glob
import os
import sys
class BaseTestCase(unittest.TestCase):
def __init__(self, engine, source, input, expect):
'''source , input, expect 可以是类文件对象或者字符串'''
self.engine = engine
self.source = source
self.input = input
self.expect = expect
super(BaseTestCase, self).__init__()
def assert_num_value_same(self, result , expect):
'所有数据值得比较,忽略其他'
#'忽略分割符(空格,逗号,回车)的比较'
tidy_result = re.findall('\d+', result, re.MULTILINE)
tidy_except = re.findall('\d+', expect, re.MULTILINE)
self.assertEqual(tidy_result, tidy_except)
def runTest(self):
'''测试输出结果是否和预期相同'''
if type(self.source) is str:
code = self.source
else:
code = self.source.read()
if type(self.input) is str:
input_stream = StringIO.StringIO(self.input)
else:
input_stream = self.input
if type(self.expect) is str:
expect = self.expect
else:
expect = self.expect.read()
output_stream = StringIO.StringIO()
self.engine.run(code, input_stream, output_stream)
self.assert_num_value_same(output_stream.getvalue(), expect)
def build_test_suit(lang, filter_func = None):
interp = __import__('interpretor.%s.interp' %(lang), fromlist = ['interpretor' ,lang])
suite = unittest.TestSuite()
subfix = lang[0:2] + lang[-1] #kec smc ooc
source_file_list = glob.glob('./%s/*.%s' %(lang,subfix))
for src_file in source_file_list:
if filter_func is not None:
if not filter_func(src_file):
continue
if os.path.isfile(src_file):
code = open(src_file).read()
try:
input_file = open(src_file[:src_file.rfind('.')] + '.in')
except IOError,e:
input_file = sys.stdin
try:
expect_out = open(src_file[:src_file.rfind('.')] + '.out').read()
except IOError,e:
expect_out = ""
#print >>sys.stderr, "test file " , input_file
test = BaseTestCase(interp, code, input_file, expect_out)
#test.config(interp, code, input_file, expect_out)
suite.addTest(test)
return suite
| Python |
#!/usr/bin/env python
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
__version__ = "$Revision: 1.15 $"
__credits__ = 'functions in the datetools interface have a high degree of Matlab(TM) compatibility'
import datetime
import time
import math
import sys
import calendar
calendar.setfirstweekday(6)
if sys.version.find('.NET') != -1:
__weekday = ((1, 'Sun'),(2, 'Mon'), (3, 'Tue'), (4, 'Wed'), (5, 'Thu'), (6, 'Fri'), (7, 'Sat'))
else:
__weekday = ((2, 'Mon'), (3, 'Tue'), (4, 'Wed'), (5, 'Thu'), (6, 'Fri'), (7, 'Sat'), (1, 'Sun'))
def __fromordinal(gdays):
if sys.version.find('.NET') != -1:
import System
add_days = gdays - datetime.date.today().toordinal()
dt = System.DateTime.Today
ret_dt = dt.AddDays(add_days)
return datetime.datetime(ret_dt.Year, ret_dt.Month, ret_dt.Day, 0, 0, 0)
else:
return datetime.date.fromordinal(int(gdays))
__days = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
__leapDays = (0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
__dateFormat = ('%d-%b-%Y %H:%M:%S', \
'%d-%b-%Y', \
'%m/%d/%y', \
'%b', \
'', \
'%m', \
'%m/%d', \
'%d', \
'%a', \
'', \
'%Y', \
'%y', \
'%b%y', \
'%H:%M:%S', \
'%I:%M:%S %p', \
'%H:%M', \
'%I:%M %p', \
'', \
'', \
'%d/%m', \
'%d/%m/%y', \
'%b.%d.%Y %H:%M:%S', \
'%b.%d.%Y', \
'%m/%d/%Y', \
'%d/%m/%Y', \
'%y/%m/%d', \
'%Y/%m/%d', \
'', \
'%b%Y', \
'%d-%b-%Y %H:%M')
def now():
"""
-------------------------------------------------------------------------------
Usage
Notes
now returns the system date and time as a serial date number.
Examples
In [1]: import datetools
In [2]: now = datetools.now()
In [3]: print now
733042.99
-------------------------------------------------------------------------------
"""
(hour, minute, second) = datetime.datetime.now().timetuple()[3:6]
return 366 + \
datetime.date.today().toordinal() + \
(((hour * 3600) + \
(minute * 60) + \
second)/86400.0)
def today():
"""
-------------------------------------------------------------------------------
Usage
Notes
today returns the current date as a serial date number.
Examples
In [1]: import datetools
In [2]: today = datetools.today()
In [3]: print today
733042
-------------------------------------------------------------------------------
"""
return 366 + datetime.date.today().toordinal()
def datenum(p, *args):
"""
-------------------------------------------------------------------------------
Usage
dateNumber = datenum(dateString)
dateNumber = datenum(year, month, day, hour = 0, minute = 0, second = 0)
Notes
dateNumber = datenum(dateString) returns a serial date number given a
dateString.
A dateString can take any one of the following formats
'19-may-1999'
'may 19, 1999'
'19-may-99'
'19-may' (current year assumed)
'5/19/99'
'5/19' (current year assumed)
'19-may-1999, 18:37'
'19-may-1999, 6:37 pm'
'5/19/99/18:37'
'5/19/99/6:37 pm'
Date numbers are the number of days that has passed since a base date.
To match the MATLAB function with the same name, date number 1 is
January 1, 0000 A.D. If the input includes time components, the date number
includes a fractional component.
dateNumber = datenum(year, month, day, hour, minute, second)
returns a serial date number given year, month, day, hour, minute, and
second integers. hour, minute, second are optional (default to 0)
Not Implemented
Date strings with two-character years, e.g., 12-jun-12, are assumed to lie
within the 100-year period centered about the current year.
Examples
In [1]: import datetools
In [2]: datetools.datenum('19-may-1999')
Out[2]: 730259.0
In [3]: datetools.datenum('5/19/99')
Out[3]: 730259.0
In [4]: datetools.datenum('19-may-1999, 6:37 pm')
Out[4]: 730259.78000000003
In [5]: datetools.datenum('5/19/99/18:37')
Out[5]: 730259.78000000003
In [6]: datetools.datenum(1999,5,19)
Out[6]: 730259.0
In [7]: datetools.datenum(1999,5,19,18,37,0)
Out[7]: 730259.78000000003
In [8]: datetools.datenum(730259)
Out[8]: 730259
Known Bug(s)
1. does not handle 0 to 693962
2. does not handle 01-01-0000 to 12-31-0000
-------------------------------------------------------------------------------
"""
if len(args) == 0:
if type(p) == str:
return __string2num(p)
elif type(p) == int or type(p) == float:
try:
datenum(datestr(p,29))
return round(p, 4)
except ValueError:
raise Exception("Invalid datenum. must be >= 693962")
raise Exception("When invoked with 1 argument, datenum expects a string, int or float")
elif len(args) <= 5:
return __dateints2num(p, *args)
else:
raise Exception("datenum accepts 1, 3, 4, 5 or 6 arguments only")
def datestr(dateNumber, dateForm = -1):
"""
-------------------------------------------------------------------------------
Usage
dateString = datestr(dateNumber, dateForm)
dateString = datestr(dateNumber)
Notes
dateString = datestr(dateNumber, dateForm) converts a date number or a date
string to a date string. DateForm specifies the format of DateString.
dateString = datestr(dateNumber) assumes dateForm is 1, 16, or 0 depending
on whether the date number contains a date, time, or both, respectively.
If date is a date string, the function assumes dateForm is 1.
DateForm Format Example
0 'dd-mmm-yyyy HH:MM:SS' 01-Mar-2000 15:45:17
1 'dd-mmm-yyyy' 01-Mar-2000
2 'mm/dd/yy' 03/01/00
3 'mmm' Mar
4 'm' M
5 'mm' 03
6 'mm/dd' 03/01
7 'dd' 01
8 'ddd' Wed
9 'd' W
10 'yyyy' 2000
11 'yy' 00
12 'mmmyy' Mar00
13 'HH:MM:SS' 15:45:17
14 'HH:MM:SS PM' 3:45:17 PM
15 'HH:MM' 15:45
16 'HH:MM PM' 3:45 PM
17 'QQ-YY' Q1 01
18 'QQ' Q1
19 'dd/mm' 01/03
20 'dd/mm/yy' 01/03/00
21 'mmm.dd.yyyy HH:MM:SS' Mar.01,2000 15:45:17
22 'mmm.dd.yyyy' Mar.01.2000
23 'mm/dd/yyyy' 03/01/2000
24 'dd/mm/yyyy' 01/03/2000
25 'yy/mm/dd' 00/03/01
26 'yyyy/mm/dd' 2000/03/01
27 'QQ-YYYY' Q1-2001
28 'mmmyyyy' Mar2000
29 'dd-mmm-yyyy HH:MM' 01-Mar-2000 15:45
Examples
In [1]: import datetools
In [2]: datetools.datestr(730123, 1)
Out[2]: '03-Jan-1999'
In [3]: datetools.datestr(730123, 2)
Out[3]: '01/03/99'
In [4]: datetools.datestr(730123.776, 0)
Out[4]: '03-Jan-1999 18:37:26'
In [5]: datetools.datestr(730123)
Out[5]: '03-Jan-1999'
In [6]: datetools.datestr(730123.776)
Out[6]: '03-Jan-1999 18:37:26'
In [7]: datetools.datestr(730123)
Out[7]: '03-Jan-1999'
In [8]: datetools.datestr(.776)
Out[8]: '06:37 PM'
Known Bug(s)
1. does not handle datenums 0 to 693961
-------------------------------------------------------------------------------
"""
if dateForm == -1:
# dateForm should be 0, 1 or 16 depending on datenum
if __isFloat(dateNumber):
if math.floor(dateNumber) == 0:
# only time
dateForm = 16
timeObject = datetime.time(*__timeTuple(dateNumber))
return timeObject.strftime(__dateFormat[dateForm])
else:
dateForm = 0
dateTimeObject = __dateTime(dateNumber)
return dateTimeObject.strftime(__dateFormat[dateForm])
elif __isInteger(dateNumber):
datePart = __fromordinal(dateNumber-366)
dateForm = 1
return datePart.strftime(__dateFormat[dateForm])
else:
dateTimeObject = __dateTime(dateNumber)
if __dateFormat[dateForm] == '':
raise NotImplementedError('dateForm ' + repr(dateForm))
return dateTimeObject.strftime(__dateFormat[dateForm])
def datevec(date):
"""
-------------------------------------------------------------------------------
Usage
dateTuple = datevec(date)
(year, month, day, hour, minute, second) = datevec(date)
Notes
dateVector = datevec(date) converts a date number or a date string to a
date tuple whose elements are [year month day hour minute second]. All
six elements are integers
(year, month, day, hour, minute, second) = datevec(date) converts a
date number or a date string to a date tuple and returns the components
of the date tuple as individual variables.
Examples
In [1]: import datetools
In [2]: datetools.datevec('28-Jul-00')
Out[2]: (2000, 7, 28, 0, 0, 0)
In [3]: datetools.datevec(730695)
Out[3]: (2000, 7, 28, 0, 0, 0)
In [4]: datetools.datevec(730695.776)
Out[4]: (2000, 7, 28, 18, 37, 26)
In [5]: (year, month, day, hour, minute, second) = datetools.datevec(730695.776)
In [6]: year
Out[6]: 2000
In [7]: month
Out[7]: 7
In [8]: day
Out[8]: 28
In [9]: hour
Out[9]: 18
In [10]: minute
Out[10]: 37
In [11]: second
Out[11]: 26
-------------------------------------------------------------------------------
"""
if type(date) in [list, tuple]:
return tuple([datevec(_date) for _date in date])
if type(date) == str:
dateTimeValue = __dateParse(date)
elif type(date) == int or type(date) == float:
dateTimeValue = __dateTime(date)
else:
raise Exception("Argument for datevec must be a datenum/datestr or a list/tuple of datenum/datestr")
return dateTimeValue.timetuple()[0:6]
year = lambda date: datevec(date)[0]
year.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
year = year(date)
Notes
returns the year of a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
month = lambda date: datevec(date)[1]
month.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
month = month(date)
Notes
returns the month of a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
day = lambda date: datevec(date)[2]
day.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
day = day(date)
Notes
returns the day of a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
hour = lambda date: datevec(date)[3]
hour.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
hour = hour(date)
Notes
returns the hour of the day given a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
minute = lambda date: datevec(date)[4]
minute.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
minute = minute(date)
Notes
returns the minute of the day given a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
second = lambda date: datevec(date)[5]
second.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
second = second(date)
Notes
returns the second of the day given a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
def eomdate(year, month):
"""
-------------------------------------------------------------------------------
Usage
dayMonth = eomdate(year, month)
Notes
eomdate - Last date of month
dayMonth = eomdate(year, month) returns the serial date number of the
last date of the month for the given year and month. Enter Year as a
four-digit integer; enter Month as an integer from 1 through 12.
Use the function datetools.datestr to convert serial date numbers to
formatted date strings.
Examples
-------------------------------------------------------------------------------
"""
return datenum(year,month,eomday(year, month))
def eomday(year, month):
"""
-------------------------------------------------------------------------------
Usage
day = eomday(year, month)
Notes
eomday - Last day of month
day= eomday(year, month) the last day of the month for the given year
and month. Enter Year as a four-digit integer; enter Month as an
integer from 1 through 12.
Either input argument can contain multiple values, but if so, the other
input must contain the same number of values or a single value that applies
to all.
Examples
In [1]: import datetools
In [2]: datetools.eomday(2000, 2)
Out[2]: 29
In [3]: datetools.eomday([2000,2001,2002,2003,2004], 2)
Out[3]: [29, 28, 28, 28, 29]
In [4]: datetools.eomday(2000,[2,7,8,12])
Out[4]: [29, 31, 31, 31]
In [5]: datetools.eomday([2000,2001,2002,2003], [2,7,8,12])
Out[5]: [29, 31, 31, 31]
-------------------------------------------------------------------------------
"""
if type(year) in [list, tuple] and type(month) in [list, tuple] and len(year) == len(month):
return tuple([(__isLeap(_year) and __leapDays[_month]) or __days[_month] for _year, _month in zip(year,month)])
elif type(year) in [list, tuple] and type(month) == int:
return tuple([(__isLeap(_year) and __leapDays[month]) or __days[month] for _year in year])
elif type(year) == int and type(month) in [list, tuple]:
return tuple([(__isLeap(year) and __leapDays[_month]) or __days[_month] for _month in month])
elif type(year) == int and type(month) == int:
return (__isLeap(year) and __leapDays[month]) or __days[month]
else:
raise Exception("Argument Exception: Invalid type(s) or combination of type(s)")
def weekday(date):
"""
-------------------------------------------------------------------------------
Usage
(dayNum, dayString) = weekday(date)
Notes
(dayNum, dayString) = weekday(date) returns the day of the week in
numeric and string form given the date as a serial date number or date
string. The days of the week have these values.
dayNum dayString
1 Sun
2 Mon
3 Tue
4 Wed
5 Thu
6 Fri
7 Sat
Examples
-------------------------------------------------------------------------------
"""
return __weekday[datetime.datetime(*datevec(date)).weekday()]
def daysact(startDate, endDate = None):
"""
-------------------------------------------------------------------------------
Usage
numDays = daysact(startDate, endDate)
Notes
daysact - Actual number of days between dates
startDate - Enter as serial date numbers or date strings.
endDate - (Optional) Enter as serial date numbers or date strings.
numDays = daysact(startDate, endDate) returns the actual number of days
between two dates.
numDays is negative if EndDate is earlier than StartDate.
numDays = daysact(startDate) returns the actual number of days between the
MATLAB base date and startDate. In MATLAB, the base date 1 is 1-1-0000 A.D.
See datenum for a similar function.
Either input can contain multiple values, but if so, the other must contain
the same number of values or a single value that applies to all. For example,
if startDate is an n-element character array of date strings, then endDate
must be an n-element character array of date strings or a single date.
numDays is then an n-element list of numbers.
Examples
In [1]: import datetools
In [2]: datetools.daysact('9/7/2002')
Out[2]: 731466.0
In [3]: datetools.daysact(['09/07/2002', '10/22/2002', '11/05/2002'])
Out[3]: [731466.0, 731511.0, 731525.0]
In [4]: datetools.daysact('7-sep-2002', '25-dec-2002')
Out[4]: 109.0
In [5]: datetools.daysact(['09/07/2002', '10/22/2002', '11/05/2002'], '12/25/2002')
Out[5]: [109.0, 64.0, 50.0]
In [6]: datetools.daysact('12/25/2002', ['09/07/2002', '10/22/2002', '11/05/2002'])
Out[6]: [-109.0, -64.0, -50.0]
-------------------------------------------------------------------------------
"""
if endDate == None:
if type(startDate) in [list, tuple]:
return tuple([daysact(_startDate) for _startDate in startDate])
else:
return datenum(startDate)
else:
if type(startDate) in [list, tuple]:
if type(endDate) in [list, tuple]:
assert len(startDate) == len(endDate), "len(startDate) != len(endDate)"
return tuple([daysact(startDate, endDate) for startDate, endDate in zip(startDate, endDate)])
else:
return tuple([daysact(_startDate, endDate) for _startDate in startDate])
elif type(endDate) in [list, tuple]:
if type(startDate) in [list, tuple]:
assert len(startDate) == len(endDate), "len(startDate) != len(endDate)"
return tuple([daysact(startDate, endDate) for startDate, endDate in zip(startDate, endDate)])
else:
return tuple([daysact(startDate, _endDate) for _endDate in endDate])
else:
return datenum(endDate) - datenum(startDate)
def lweekdate(weekday, year, month, nextDay=0):
"""
Usage
lastDate = lweekdate(weekday, year, month, nextDay)
Notes
Date of last occurrence of weekday in month
returns the serial date number for the last occurrence of Weekday in the given
year and month and in a week that also contains NextDay.
Weekday Weekday whose date you seek. Enter as an integer from 1 through 7:
1 Sunday
2 Monday
3 Tuesday
4 Wednesday
5 Thursday
6 Friday
7 Saturday
Year Year. Enter as a four-digit integer.
Month Month. Enter as an integer from 1 through 12.
Not Implemented
NextDay (Optional) Weekday that must occur after Weekday in the same week.
Enter as an integer from 0 through 7, where 0 = ignore (default) and 1 through 7
are the same as for Weekday.
Any input can contain multiple values, but if so, all other inputs must contain
the same number of values or a single value that applies to all.
See Also
Use the function datestr to convert serial date numbers to formatted date strings.
Examples
"""
assert weekday in range(1,8), "weekday must be in range(1,8)"
assert month in range(1,13), "month must be in range(1,13)"
assert year in range(0, 10000), "year must be in range(0,10000)"
assert nextDay in range(0,8), "weekday must be in range(0,8)"
day = calendar.monthcalendar(year,month)[-1][weekday-1]
if day == 0:
day = calendar.monthcalendar(year,month)[-2][weekday-1]
return datenum(year, month, day)
# -----------------------------------------------------------------------------
# private functions
# -----------------------------------------------------------------------------
def __string2num(dateString):
dateTime = __dateParse(dateString)
return __datenum(dateTime)
def __dateints2num(year, month, day, hour = 0, minute = 0, second = 0):
"""
"""
assert year >= 0
assert month >= 1 and month <= 12
assert day >= 1 and day <= 31
assert hour >= 0 and hour <= 23
assert minute >= 0 and minute <= 59
assert second >= 0 and second <= 59
dateTime = datetime.datetime(year=year, month=month, day=day,
hour=hour, minute=minute, second=second)
return __datenum(dateTime)
def __datenum(dateTime):
return round(366 + datetime.date(dateTime.year, dateTime.month, dateTime.day).toordinal() + \
(((dateTime.hour * 3600) + (dateTime.minute * 60) + dateTime.second)/86400.0), 4)
def __isLeap(year):
if year % 400 == 0 or (year % 100 != 0 and year % 4 == 0):
return True
else:
return False
def __ndays(year, months, day):
if __isLeap(year):
if __leapDays[months] < day:
return __leapDays[months]
else:
return day
else:
if __days[months] < day:
return __days[months]
else:
return day
def __dateParse(dateString):
try: # '19-may-1999 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y %H:%M:%S")[0:6])
except:
pass
try: # '19-may-1999, 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y, %H:%M:%S")[0:6])
except:
pass
try: # '19-may-1999 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y %I:%M:%S %p")[0:6])
except:
pass
try: # '19-may-1999, 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y, %I:%M:%S %p")[0:6])
except:
pass
try: # '5-19-1999 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y %H:%M:%S")[0:6])
except:
pass
try: # '5-19-1999, 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y, %H:%M:%S")[0:6])
except:
pass
try: # '5-19-1999 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y %I:%M:%S %p")[0:6])
except:
pass
try: # '5-19-1999, 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y, %I:%M:%S %p")[0:6])
except:
pass
try: # '5/19/99/6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y/%I:%M:%S %p")[0:6])
except:
pass
try: # '5/19/99 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y %H:%M:%S")[0:6])
except:
pass
try: # '5-19-99 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y %I:%M:%S %p")[0:6])
except:
pass
try: # '5-19-99 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y %H:%M:%S")[0:6])
except:
pass
try: # '5-19-99, 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y, %I:%M:%S %p")[0:6])
except:
pass
try: # '5-19-99, 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y, %H:%M:%S")[0:6])
except:
pass
try: # '19-may-1999 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y %I:%M %p")[0:5])
except:
pass
try: # '19-may-1999, 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y, %I:%M %p")[0:5])
except:
pass
try: # '19-may-1999 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y %H:%M")[0:5])
except:
pass
try: # '19-may-1999, 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y, %H:%M")[0:5])
except:
pass
try: # '5-19-1999 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y %I:%M %p")[0:5])
except:
pass
try: # '5-19-1999, 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y, %I:%M %p")[0:5])
except:
pass
try: # '5-19-1999 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y %H:%M")[0:5])
except:
pass
try: # '5-19-1999, 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y, %H:%M")[0:5])
except:
pass
try: # '5/19/99/6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y/%I:%M %p")[0:5])
except:
pass
try: # '5-19-99 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y %I:%M %p")[0:5])
except:
pass
try: # '5/19/99/18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y/%H:%M")[0:5])
except:
pass
try: # '5-19-99 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y %H:%M")[0:5])
except:
pass
try: # '5/19/99'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y")[0:3])
except:
pass
try: # '5-19-99'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y")[0:3])
except:
pass
try: # '5/19/1999'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%Y")[0:3])
except:
pass
try: # '5-19-1999'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y")[0:3])
except:
pass
try: # '19-may-99'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%y")[0:3])
except:
pass
try: # 'may 19, 1999'
return datetime.datetime(*time.strptime(dateString.lower(), "%b %d, %Y")[0:3])
except:
pass
try: # '19-may-1999'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y")[0:3])
except:
pass
try: # '5/19' current year assumed
temp = datetime.datetime(*time.strptime(dateString.lower(), "%m/%d")[1:3])
temp.insert(0, datetime.date.today().year)
return datetime.datetime(*tuple(temp))
except:
pass
try: # '19-may' cureent year assumed
temp = list(time.strptime('19-may'.lower(), "%d-%b")[1:3])
temp.insert(0, datetime.date.today().year)
return datetime.datetime(*tuple(temp))
except:
pass
raise ValueError ('Unable to parse dateString. Format Error ' + dateString)
__isFloat = lambda n: round(n) - n != 0
__isInteger = lambda n: round(n) - n == 0
__numberSplit = lambda n: (math.floor(n), n - math.floor(n))
__timeTuple = lambda n: \
(int(__numberSplit(__numberSplit(n)[1] * 24)[0]), \
int(__numberSplit(__numberSplit(__numberSplit(n)[1] * 24)[1] * 60)[0]), \
int(round(__numberSplit(__numberSplit(__numberSplit(n)[1] * 24)[1] * 60)[1] * 60)))
__dateTuple = lambda n: __fromordinal(int(math.floor(n-366))).timetuple()[0:3]
__dateTime = lambda n: datetime.datetime(*(__dateTuple(n) + __timeTuple(n)))
| Python |
#!/usr/bin/env python
# test_datetools.py
__version__ = "$Revision: 1.12 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
import support
from support import TODO, TestCase
if __name__ == '__main__':
support.adjust_path()
import datetools
import datetime
import time
class DateToolsTestCase(TestCase):
def test_today(self):
assert datetools.today() == datetime.date.today().toordinal()+366
def test_now(self):
assert datetools.now() == 366 + \
datetime.date.today().toordinal() + \
(((time.localtime()[3] * 3600) + \
(time.localtime()[4] * 60) + \
time.localtime()[5])/86400.0)
def test_datenum1(self):
assert datetools.datenum('19-may-1999') == 730259
def test_datenum2(self):
assert datetools.datenum('5/19/99') == 730259
def test_datenum3(self):
assert datetools.datenum('19-may-1999, 6:37 pm') == 730259.7757
def test_datenum4(self):
assert datetools.datenum('5/19/99/6:37 pm') == 730259.7757
def test_datenum5(self):
assert datetools.datenum('7/11/2000') == datetools.datenum('11-JUL-2000')
def test_datenum6(self):
assert datetools.datenum(1999,5,19) == 730259
def test_datenum7(self):
assert datetools.datenum(1999,5,19,18,37) == 730259.7757
def test_datenum8(self):
assert datetools.datenum(730259) == 730259
def test_datenum9(self):
assert datetools.datenum(730259.78) == 730259.78
def test_datestr1(self):
assert datetools.datestr(730259) == '19-May-1999'
def test_datestr2(self):
assert datetools.datestr(730123, 1) == '03-Jan-1999'
def test_datestr3(self):
assert datetools.datestr(730123, 2) == '01/03/99'
def test_datestr4(self):
assert datetools.datestr(730123.776, 0) == '03-Jan-1999 18:37:26'
def test_datestr5(self):
assert datetools.datestr(730123) == '03-Jan-1999'
def test_datestr6(self):
assert datetools.datestr(730123.776) == '03-Jan-1999 18:37:26'
def test_datestr7(self):
assert datetools.datestr(730123) == '03-Jan-1999'
def test_datestr8(self):
assert datetools.datestr(.776) == '06:37 PM' \
or datetools.datestr(.776) == '06:37 P' # IronPython 1.0.1
def test_datevec1(self):
assert datetools.datevec('28-Jul-00') == (2000, 7, 28, 0, 0, 0)
def test_datevec2(self):
assert datetools.datevec(730695) == (2000, 7, 28, 0, 0, 0)
def test_datevec3(self):
assert datetools.datevec(730695.776) == (2000, 7, 28, 18, 37, 26)
def test_datevec4(self):
assert datetools.datevec('10-Jul-2000 19:23:12') == (2000, 7, 10, 19, 23, 12)
def test_datevec5(self):
assert datetools.datevec(range(730695,730698)) == \
((2000, 7, 28, 0, 0, 0), (2000, 7, 29, 0, 0, 0), (2000, 7, 30, 0, 0, 0))
def test_datevec6(self):
assert datetools.datevec([730695,730696,730698]) == \
((2000, 7, 28, 0, 0, 0), (2000, 7, 29, 0, 0, 0), (2000, 7, 31, 0, 0, 0))
def test_datevec7(self):
assert datetools.datevec((730695,730696,730698)) == \
((2000, 7, 28, 0, 0, 0), (2000, 7, 29, 0, 0, 0), (2000, 7, 31, 0, 0, 0))
def test_datevec8(self):
assert (1999,5,19,0,0,0) == datetools.datevec('19-may-1999')
def test_datevec9(self):
assert (1999,5,19,0,0,0) == datetools.datevec('may 19, 1999')
def test_datevec10(self):
assert (1999,5,19,0,0,0) == datetools.datevec('19-may-99')
def test_datevec11(self):
assert (datetime.date.today().year,5,19,0,0,0) == datetools.datevec('19-may')
def test_datevec12(self):
assert (1999,5,19,0,0,0) == datetools.datevec('5/19/99')
def test_datevec13(self):
assert (datetime.date.today().year,5,19,0,0,0) == datetools.datevec('5/19')
def test_datevec14(self):
assert (1999,5,19,18,37,0) == datetools.datevec('19-may-1999, 18:37')
def test_datevec15(self):
assert (1999,5,19,18,37,0) == datetools.datevec('19-may-1999, 6:37 pm')
def test_datevec16(self):
assert (1999,5,19,18,37,0) == datetools.datevec('5/19/99/18:37')
def test_datevec17(self):
assert (1999,5,19,18,37,0) == datetools.datevec('5/19/99/6:37 pm')
def test_year1(self):
assert datetools.year('07-28-00 18:49:12') == 2000
def test_year2(self):
assert datetools.year('28-Jul-2000 18:49') == 2000
def test_year3(self):
assert datetools.year('7/28/00') == 2000
def test_year4(self):
assert datetools.year(730259) == 1999
def test_month1(self):
assert datetools.month('07-28-00 18:49:12') == 7
def test_month2(self):
assert datetools.month('28-Jul-2000 18:49') == 7
def test_month3(self):
assert datetools.month('7/28/00') == 7
def test_month4(self):
assert datetools.month(730259) == 5
def test_day1(self):
assert datetools.day('07-28-00 18:49:12') == 28
def test_day2(self):
assert datetools.day('28-Jul-2000 18:49') == 28
def test_day3(self):
assert datetools.day('7/28/00') == 28
def test_day4(self):
assert datetools.day(730259) == 19
def test_minute1(self):
assert datetools.minute('28-Jul-2000 18:49:12') == 49
def test_minute2(self):
assert datetools.minute('28-Jul-2000 18:49') == 49
def test_minute3(self):
assert datetools.minute('28-Jul-2000') == 0
def test_hour1(self):
assert datetools.hour('28-Jul-2000 18:49:12') == 18
def test_hour2(self):
assert datetools.hour('28-Jul-2000') == 0
def test_hour3(self):
assert datetools.hour('28-Jul-2000 6:49:12 am') == 6
def test_hour4(self):
assert datetools.hour('28-Jul-2000 6:49:12 pm') == 18
def test_second1(self):
assert datetools.second('28-Jul-2000 18:49:00') == 0
def test_second2(self):
assert datetools.second('28-Jul-2000 18:49:59') == 59
def test_second3(self):
assert datetools.second('28-Jul-2000 18:49:01') == 1
def test_second4(self):
assert datetools.second('28-Jul-2000') == 0
def test_eomdate1(self):
assert datetools.eomdate(2001, 2) == 730910
def test_eomdate2(self):
assert datetools.datestr(730910) == '28-Feb-2001'
def test_eomdate3(self):
assert [datetools.eomdate(year, 2) for year in (2002,2003,2004,2005)] == \
[731275,731640,732006,732371]
def test_eomday1(self):
assert datetools.eomday(2000, 2) == 29
def test_eomday2(self):
assert datetools.eomday(2002, 2) == 28
def test_eomday3(self):
assert datetools.eomday(2001, 12) == 31
def test_eomday4(self):
assert datetools.eomday([2000,2001,2002,2003,2004],2) == (29,28,28,28,29)
def test_eomday5(self):
assert datetools.eomday(2000,[2,7,8,12]) == (29,31,31,31)
def test_eomday6(self):
assert datetools.eomday([2000,2001,2002,2003], [2,7,8,12]) == (29,31,31,31)
def test_eomday7(self):
assert datetools.eomday((2000,2001,2002,2003,2004),2) == (29,28,28,28,29)
def test_eomday8(self):
assert datetools.eomday(2000,(2,7,8,12)) == (29,31,31,31)
def test_eomday9(self):
assert datetools.eomday((2000,2001,2002,2003), [2,7,8,12]) == (29,31,31,31)
def test_eomday10(self):
assert datetools.eomday([2000,2001,2002,2003], (2,7,8,12)) == (29,31,31,31)
def test_eomday11(self):
assert datetools.eomday((2000,2001,2002,2003), (2,7,8,12)) == (29,31,31,31)
def test_weekday1(self):
assert datetools.weekday(730845) == (2, 'Mon')
def test_weekday2(self):
assert datetools.weekday(730846) == (3, 'Tue')
def test_weekday3(self):
assert datetools.weekday(730847) == (4, 'Wed')
def test_weekday4(self):
assert datetools.weekday(730848) == (5, 'Thu')
def test_weekday5(self):
assert datetools.weekday(730849) == (6, 'Fri')
def test_weekday6(self):
assert datetools.weekday(730850) == (7, 'Sat')
def test_weekday7(self):
assert datetools.weekday(730851) == (1, 'Sun')
def test_weekday8(self):
assert datetools.weekday('25-Dec-2000') == (2, 'Mon')
def test_weekday9(self):
assert datetools.weekday('26-Dec-2000') == (3, 'Tue')
def test_weekday10(self):
assert datetools.weekday('27-Dec-2000') == (4, 'Wed')
def test_weekday11(self):
assert datetools.weekday('28-Dec-2000') == (5, 'Thu')
def test_weekday12(self):
assert datetools.weekday('29-Dec-2000') == (6, 'Fri')
def test_weekday13(self):
assert datetools.weekday('30-Dec-2000') == (7, 'Sat')
def test_weekday14(self):
assert datetools.weekday('31-Dec-2000') == (1, 'Sun')
def test_daysact1(self):
assert datetools.daysact('9/7/2002') == 731466
def test_daysact2(self):
assert (731466, 731511, 731525) == \
datetools.daysact(['09/07/2002', '10/22/2002', '11/05/2002'])
def test_daysact3(self):
assert (731466, 731511, 731525) == \
datetools.daysact(('09/07/2002', '10/22/2002', '11/05/2002'))
def test_daysact4(self):
assert datetools.daysact('7-sep-2002', '25-dec-2002') == 109
def test_daysact5(self):
assert (109,64,50) == \
datetools.daysact(['09/07/2002', '10/22/2002', '11/05/2002'], '12/25/2002')
def test_daysact6(self):
assert (109,64,50) == \
datetools.daysact(('09/07/2002', '10/22/2002', '11/05/2002'), '12/25/2002')
def test_daysact7(self):
assert (-109,-64,-50) == \
datetools.daysact('12/25/2002', ['09/07/2002', '10/22/2002', '11/05/2002'])
def test_daysact8(self):
assert (-109,-64,-50) == \
datetools.daysact('12/25/2002', ('09/07/2002', '10/22/2002', '11/05/2002'))
def test_lweekdate1(self):
assert datetools.datestr(datetools.lweekdate(2,2001,6)) == '25-Jun-2001'
def test_lweekdate2(self):
assert datetools.datestr(datetools.lweekdate(1,2007,1)) == '28-Jan-2007'
def test_lweekdate3(self):
assert datetools.datestr(datetools.lweekdate(2,2007,1)) == '29-Jan-2007'
def test_lweekdate4(self):
assert datetools.datestr(datetools.lweekdate(3,2007,1)) == '30-Jan-2007'
def test_lweekdate5(self):
assert datetools.datestr(datetools.lweekdate(4,2007,1)) == '31-Jan-2007'
def test_lweekdate6(self):
assert datetools.datestr(datetools.lweekdate(5,2007,1)) == '25-Jan-2007'
def test_lweekdate7(self):
assert datetools.datestr(datetools.lweekdate(6,2007,1)) == '26-Jan-2007'
def test_lweekdate8(self):
assert datetools.datestr(datetools.lweekdate(7,2007,1)) == '27-Jan-2007'
if __name__ == '__main__':
import __main__
support.run_all_tests(__main__) | Python |
import types
import unittest
import sys
import os.path
import time
from unittest import _strclass
# Backwards compatibility for Python 2.3
#############################################################
try:
for t in unittest.TestSuite():
pass
except TypeError:
def TestSuite_iter(self):
return iter(self._tests)
unittest.TestSuite.__iter__ = TestSuite_iter
#############################################################
# /Backwards compatibility for Python 2.3
def run_all_tests(test_mod, moretests=None):
alltests = unittest.TestLoader().loadTestsFromModule(test_mod)
if moretests != None:
alltests.addTests(moretests)
TodoTextRunner(verbosity=2).run(alltests)
def adjust_path():
parent_dir = os.path.split(sys.path[0])[0]
sys.path = [parent_dir] + sys.path
class _Todo_Exception(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class Todo_Failed(_Todo_Exception):
pass
class Todo_Passed(_Todo_Exception):
pass
def TODO(message="TODO"):
def decorator(func):
def __todo_func(*args, **kwargs):
try:
ret_val = func(*args, **kwargs)
except Exception, e:
raise Todo_Failed(message)
raise Todo_Passed(message)
__todo_func.__name__ = func.__name__
__todo_func.__doc__ = func.__doc__
__todo_func.__module__ = func.__module__
return __todo_func
return decorator
class TodoResult(unittest.TestResult):
def __init__(self):
unittest.TestResult.__init__(self)
self.todo_failed = []
self.todo_passed = []
def addTodoFailed(self, test, err):
self.todo_failed.append((test, self._exc_info_to_string(err, test)))
def addTodoPassed(self, test, err):
self.todo_passed.append((test, self._exc_info_to_string(err, test)))
def wasSuccessful(self):
p_success = unittest.TestResult.wasSuccessful(self)
return p_success and not self.stillTodo()
def stillTodo(self):
return self.todo_failed or self.todo_passed
class TodoTextResult(unittest._TextTestResult, TodoResult):
def __init__(self, *vargs, **kwargs):
TodoResult.__init__(self)
unittest._TextTestResult.__init__(self, *vargs, **kwargs)
def addTodoFailed(self, test, err):
TodoResult.addTodoFailed(self, test, err)
if self.showAll:
self.stream.writeln("TODO FAIL")
elif self.dots:
self.stream.write('TF')
def addTodoPassed(self, test, err):
TodoResult.addTodoPassed(self, test, err)
if self.showAll:
self.stream.writeln("TODO PASS")
elif self.dots:
self.stream.write('TP')
def printErrors(self):
self.printErrorList('TODO(PASS)', self.todo_passed)
self.printErrorList('TODO(FAIL)', self.todo_failed)
unittest._TextTestResult.printErrors(self)
class TodoCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
""" Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
unittest.TestCase.__init__(self, methodName)
try:
self.__testMethodName = methodName
testMethod = getattr(self, methodName)
self.__testMethodDoc = testMethod.__doc__
except AttributeError:
raise ValueError, "no such test method in %s: %s" % \
(self.__class__, methodName)
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self.__testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def __str__(self):
return "%s (%s)" % (self.__testMethodName, _strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(_strclass(self.__class__), self.__testMethodName)
def __exc_info(self):
"""Return a version of sys.exc_info() with the traceback frame
minimised; usually the top level of the traceback frame is not
needed.
"""
exctype, excvalue, tb = sys.exc_info()
if sys.platform[:4] == 'java': ## tracebacks look different in Jython
return (exctype, excvalue, tb)
return (exctype, excvalue, tb)
def run(self, result):
result.startTest(self)
testMethod = getattr(self, self.__testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self.__exc_info())
return
ok = False
try:
testMethod()
ok = True
except Todo_Failed:
result.addTodoFailed(self, self.__exc_info())
except Todo_Passed:
result.addTodoPassed(self, self.__exc_info())
except self.failureException:
result.addFailure(self, self.__exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, self.__exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self.__exc_info())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
class TodoTextRunner(unittest.TextTestRunner):
def run(self, test):
"Run the given test case or test suite."
result = TodoTextResult(self.stream, self.descriptions, self.verbosity)
startTime = time.time()
test.run(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
if result.stillTodo():
self.stream.write("TODO (")
else:
self.stream.write("FAILED (")
status = ("failures", "errors", "todo_passed", "todo_failed")
self.stream.write(", ".join(["%s=%d" % (s, len(getattr(result, s))) for s in status]))
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
TestCase = TodoCase #unittest.TestCase
### The following are some convenience functions used throughout the test
### suite
def test_equality(eq_tests, ne_tests, repeats=10):
eq_error = "Problem with __eq__ with %s and %s"
ne_error = "Problem with __ne__ with %s and %s"
# We run this multiple times to try and shake out any errors
# related to differences in set/dict/etc ordering
for _ in xrange(0, repeats):
for (left, right) in eq_tests:
try:
assert left == right
except AssertionError:
raise AssertionError(eq_error % (left, right))
try:
assert not left != right
except AssertionError:
raise AssertionError(ne_error % (left, right))
for (left, right) in ne_tests:
try:
assert left != right
except AssertionError:
raise AssertionError(ne_error % (left, right))
try:
assert not left == right
except AssertionError:
raise AssertionError(eq_error % (left, right))
def test_hash(eq_tests, ne_tests, repeats=10):
hash_error = "Problem with hash() with %s and %s"
# We run this multiple times to try and shake out any errors
# related to differences in set/dict/etc ordering
for _ in xrange(0, repeats):
for (left, right) in eq_tests:
try:
assert hash(left) == hash(right)
except AssertionError:
raise AssertionError(hash_error % (left, right))
for (left, right) in ne_tests:
try:
assert hash(left) != hash(right)
except AssertionError:
raise AssertionError(hash_error % (left, right))
| Python |
#!/usr/bin/env python
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
__version__ = "$Revision: 1.15 $"
__credits__ = 'functions in the datetools interface have a high degree of Matlab(TM) compatibility'
import datetime
import time
import math
import sys
import calendar
calendar.setfirstweekday(6)
if sys.version.find('.NET') != -1:
__weekday = ((1, 'Sun'),(2, 'Mon'), (3, 'Tue'), (4, 'Wed'), (5, 'Thu'), (6, 'Fri'), (7, 'Sat'))
else:
__weekday = ((2, 'Mon'), (3, 'Tue'), (4, 'Wed'), (5, 'Thu'), (6, 'Fri'), (7, 'Sat'), (1, 'Sun'))
def __fromordinal(gdays):
if sys.version.find('.NET') != -1:
import System
add_days = gdays - datetime.date.today().toordinal()
dt = System.DateTime.Today
ret_dt = dt.AddDays(add_days)
return datetime.datetime(ret_dt.Year, ret_dt.Month, ret_dt.Day, 0, 0, 0)
else:
return datetime.date.fromordinal(int(gdays))
__days = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
__leapDays = (0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
__dateFormat = ('%d-%b-%Y %H:%M:%S', \
'%d-%b-%Y', \
'%m/%d/%y', \
'%b', \
'', \
'%m', \
'%m/%d', \
'%d', \
'%a', \
'', \
'%Y', \
'%y', \
'%b%y', \
'%H:%M:%S', \
'%I:%M:%S %p', \
'%H:%M', \
'%I:%M %p', \
'', \
'', \
'%d/%m', \
'%d/%m/%y', \
'%b.%d.%Y %H:%M:%S', \
'%b.%d.%Y', \
'%m/%d/%Y', \
'%d/%m/%Y', \
'%y/%m/%d', \
'%Y/%m/%d', \
'', \
'%b%Y', \
'%d-%b-%Y %H:%M')
def now():
"""
-------------------------------------------------------------------------------
Usage
Notes
now returns the system date and time as a serial date number.
Examples
In [1]: import datetools
In [2]: now = datetools.now()
In [3]: print now
733042.99
-------------------------------------------------------------------------------
"""
(hour, minute, second) = datetime.datetime.now().timetuple()[3:6]
return 366 + \
datetime.date.today().toordinal() + \
(((hour * 3600) + \
(minute * 60) + \
second)/86400.0)
def today():
"""
-------------------------------------------------------------------------------
Usage
Notes
today returns the current date as a serial date number.
Examples
In [1]: import datetools
In [2]: today = datetools.today()
In [3]: print today
733042
-------------------------------------------------------------------------------
"""
return 366 + datetime.date.today().toordinal()
def datenum(p, *args):
"""
-------------------------------------------------------------------------------
Usage
dateNumber = datenum(dateString)
dateNumber = datenum(year, month, day, hour = 0, minute = 0, second = 0)
Notes
dateNumber = datenum(dateString) returns a serial date number given a
dateString.
A dateString can take any one of the following formats
'19-may-1999'
'may 19, 1999'
'19-may-99'
'19-may' (current year assumed)
'5/19/99'
'5/19' (current year assumed)
'19-may-1999, 18:37'
'19-may-1999, 6:37 pm'
'5/19/99/18:37'
'5/19/99/6:37 pm'
Date numbers are the number of days that has passed since a base date.
To match the MATLAB function with the same name, date number 1 is
January 1, 0000 A.D. If the input includes time components, the date number
includes a fractional component.
dateNumber = datenum(year, month, day, hour, minute, second)
returns a serial date number given year, month, day, hour, minute, and
second integers. hour, minute, second are optional (default to 0)
Not Implemented
Date strings with two-character years, e.g., 12-jun-12, are assumed to lie
within the 100-year period centered about the current year.
Examples
In [1]: import datetools
In [2]: datetools.datenum('19-may-1999')
Out[2]: 730259.0
In [3]: datetools.datenum('5/19/99')
Out[3]: 730259.0
In [4]: datetools.datenum('19-may-1999, 6:37 pm')
Out[4]: 730259.78000000003
In [5]: datetools.datenum('5/19/99/18:37')
Out[5]: 730259.78000000003
In [6]: datetools.datenum(1999,5,19)
Out[6]: 730259.0
In [7]: datetools.datenum(1999,5,19,18,37,0)
Out[7]: 730259.78000000003
In [8]: datetools.datenum(730259)
Out[8]: 730259
Known Bug(s)
1. does not handle 0 to 693962
2. does not handle 01-01-0000 to 12-31-0000
-------------------------------------------------------------------------------
"""
if len(args) == 0:
if type(p) == str:
return __string2num(p)
elif type(p) == int or type(p) == float:
try:
datenum(datestr(p,29))
return round(p, 4)
except ValueError:
raise Exception("Invalid datenum. must be >= 693962")
raise Exception("When invoked with 1 argument, datenum expects a string, int or float")
elif len(args) <= 5:
return __dateints2num(p, *args)
else:
raise Exception("datenum accepts 1, 3, 4, 5 or 6 arguments only")
def datestr(dateNumber, dateForm = -1):
"""
-------------------------------------------------------------------------------
Usage
dateString = datestr(dateNumber, dateForm)
dateString = datestr(dateNumber)
Notes
dateString = datestr(dateNumber, dateForm) converts a date number or a date
string to a date string. DateForm specifies the format of DateString.
dateString = datestr(dateNumber) assumes dateForm is 1, 16, or 0 depending
on whether the date number contains a date, time, or both, respectively.
If date is a date string, the function assumes dateForm is 1.
DateForm Format Example
0 'dd-mmm-yyyy HH:MM:SS' 01-Mar-2000 15:45:17
1 'dd-mmm-yyyy' 01-Mar-2000
2 'mm/dd/yy' 03/01/00
3 'mmm' Mar
4 'm' M
5 'mm' 03
6 'mm/dd' 03/01
7 'dd' 01
8 'ddd' Wed
9 'd' W
10 'yyyy' 2000
11 'yy' 00
12 'mmmyy' Mar00
13 'HH:MM:SS' 15:45:17
14 'HH:MM:SS PM' 3:45:17 PM
15 'HH:MM' 15:45
16 'HH:MM PM' 3:45 PM
17 'QQ-YY' Q1 01
18 'QQ' Q1
19 'dd/mm' 01/03
20 'dd/mm/yy' 01/03/00
21 'mmm.dd.yyyy HH:MM:SS' Mar.01,2000 15:45:17
22 'mmm.dd.yyyy' Mar.01.2000
23 'mm/dd/yyyy' 03/01/2000
24 'dd/mm/yyyy' 01/03/2000
25 'yy/mm/dd' 00/03/01
26 'yyyy/mm/dd' 2000/03/01
27 'QQ-YYYY' Q1-2001
28 'mmmyyyy' Mar2000
29 'dd-mmm-yyyy HH:MM' 01-Mar-2000 15:45
Examples
In [1]: import datetools
In [2]: datetools.datestr(730123, 1)
Out[2]: '03-Jan-1999'
In [3]: datetools.datestr(730123, 2)
Out[3]: '01/03/99'
In [4]: datetools.datestr(730123.776, 0)
Out[4]: '03-Jan-1999 18:37:26'
In [5]: datetools.datestr(730123)
Out[5]: '03-Jan-1999'
In [6]: datetools.datestr(730123.776)
Out[6]: '03-Jan-1999 18:37:26'
In [7]: datetools.datestr(730123)
Out[7]: '03-Jan-1999'
In [8]: datetools.datestr(.776)
Out[8]: '06:37 PM'
Known Bug(s)
1. does not handle datenums 0 to 693961
-------------------------------------------------------------------------------
"""
if dateForm == -1:
# dateForm should be 0, 1 or 16 depending on datenum
if __isFloat(dateNumber):
if math.floor(dateNumber) == 0:
# only time
dateForm = 16
timeObject = datetime.time(*__timeTuple(dateNumber))
return timeObject.strftime(__dateFormat[dateForm])
else:
dateForm = 0
dateTimeObject = __dateTime(dateNumber)
return dateTimeObject.strftime(__dateFormat[dateForm])
elif __isInteger(dateNumber):
datePart = __fromordinal(dateNumber-366)
dateForm = 1
return datePart.strftime(__dateFormat[dateForm])
else:
dateTimeObject = __dateTime(dateNumber)
if __dateFormat[dateForm] == '':
raise NotImplementedError('dateForm ' + repr(dateForm))
return dateTimeObject.strftime(__dateFormat[dateForm])
def datevec(date):
"""
-------------------------------------------------------------------------------
Usage
dateTuple = datevec(date)
(year, month, day, hour, minute, second) = datevec(date)
Notes
dateVector = datevec(date) converts a date number or a date string to a
date tuple whose elements are [year month day hour minute second]. All
six elements are integers
(year, month, day, hour, minute, second) = datevec(date) converts a
date number or a date string to a date tuple and returns the components
of the date tuple as individual variables.
Examples
In [1]: import datetools
In [2]: datetools.datevec('28-Jul-00')
Out[2]: (2000, 7, 28, 0, 0, 0)
In [3]: datetools.datevec(730695)
Out[3]: (2000, 7, 28, 0, 0, 0)
In [4]: datetools.datevec(730695.776)
Out[4]: (2000, 7, 28, 18, 37, 26)
In [5]: (year, month, day, hour, minute, second) = datetools.datevec(730695.776)
In [6]: year
Out[6]: 2000
In [7]: month
Out[7]: 7
In [8]: day
Out[8]: 28
In [9]: hour
Out[9]: 18
In [10]: minute
Out[10]: 37
In [11]: second
Out[11]: 26
-------------------------------------------------------------------------------
"""
if type(date) in [list, tuple]:
return tuple([datevec(_date) for _date in date])
if type(date) == str:
dateTimeValue = __dateParse(date)
elif type(date) == int or type(date) == float:
dateTimeValue = __dateTime(date)
else:
raise Exception("Argument for datevec must be a datenum/datestr or a list/tuple of datenum/datestr")
return dateTimeValue.timetuple()[0:6]
year = lambda date: datevec(date)[0]
year.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
year = year(date)
Notes
returns the year of a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
month = lambda date: datevec(date)[1]
month.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
month = month(date)
Notes
returns the month of a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
day = lambda date: datevec(date)[2]
day.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
day = day(date)
Notes
returns the day of a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
hour = lambda date: datevec(date)[3]
hour.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
hour = hour(date)
Notes
returns the hour of the day given a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
minute = lambda date: datevec(date)[4]
minute.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
minute = minute(date)
Notes
returns the minute of the day given a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
second = lambda date: datevec(date)[5]
second.__doc__ = \
"""
-------------------------------------------------------------------------------
Usage
second = second(date)
Notes
returns the second of the day given a serial date number or a date string.
Examples
-------------------------------------------------------------------------------
"""
def eomdate(year, month):
"""
-------------------------------------------------------------------------------
Usage
dayMonth = eomdate(year, month)
Notes
eomdate - Last date of month
dayMonth = eomdate(year, month) returns the serial date number of the
last date of the month for the given year and month. Enter Year as a
four-digit integer; enter Month as an integer from 1 through 12.
Use the function datetools.datestr to convert serial date numbers to
formatted date strings.
Examples
-------------------------------------------------------------------------------
"""
return datenum(year,month,eomday(year, month))
def eomday(year, month):
"""
-------------------------------------------------------------------------------
Usage
day = eomday(year, month)
Notes
eomday - Last day of month
day= eomday(year, month) the last day of the month for the given year
and month. Enter Year as a four-digit integer; enter Month as an
integer from 1 through 12.
Either input argument can contain multiple values, but if so, the other
input must contain the same number of values or a single value that applies
to all.
Examples
In [1]: import datetools
In [2]: datetools.eomday(2000, 2)
Out[2]: 29
In [3]: datetools.eomday([2000,2001,2002,2003,2004], 2)
Out[3]: [29, 28, 28, 28, 29]
In [4]: datetools.eomday(2000,[2,7,8,12])
Out[4]: [29, 31, 31, 31]
In [5]: datetools.eomday([2000,2001,2002,2003], [2,7,8,12])
Out[5]: [29, 31, 31, 31]
-------------------------------------------------------------------------------
"""
if type(year) in [list, tuple] and type(month) in [list, tuple] and len(year) == len(month):
return tuple([(__isLeap(_year) and __leapDays[_month]) or __days[_month] for _year, _month in zip(year,month)])
elif type(year) in [list, tuple] and type(month) == int:
return tuple([(__isLeap(_year) and __leapDays[month]) or __days[month] for _year in year])
elif type(year) == int and type(month) in [list, tuple]:
return tuple([(__isLeap(year) and __leapDays[_month]) or __days[_month] for _month in month])
elif type(year) == int and type(month) == int:
return (__isLeap(year) and __leapDays[month]) or __days[month]
else:
raise Exception("Argument Exception: Invalid type(s) or combination of type(s)")
def weekday(date):
"""
-------------------------------------------------------------------------------
Usage
(dayNum, dayString) = weekday(date)
Notes
(dayNum, dayString) = weekday(date) returns the day of the week in
numeric and string form given the date as a serial date number or date
string. The days of the week have these values.
dayNum dayString
1 Sun
2 Mon
3 Tue
4 Wed
5 Thu
6 Fri
7 Sat
Examples
-------------------------------------------------------------------------------
"""
return __weekday[datetime.datetime(*datevec(date)).weekday()]
def daysact(startDate, endDate = None):
"""
-------------------------------------------------------------------------------
Usage
numDays = daysact(startDate, endDate)
Notes
daysact - Actual number of days between dates
startDate - Enter as serial date numbers or date strings.
endDate - (Optional) Enter as serial date numbers or date strings.
numDays = daysact(startDate, endDate) returns the actual number of days
between two dates.
numDays is negative if EndDate is earlier than StartDate.
numDays = daysact(startDate) returns the actual number of days between the
MATLAB base date and startDate. In MATLAB, the base date 1 is 1-1-0000 A.D.
See datenum for a similar function.
Either input can contain multiple values, but if so, the other must contain
the same number of values or a single value that applies to all. For example,
if startDate is an n-element character array of date strings, then endDate
must be an n-element character array of date strings or a single date.
numDays is then an n-element list of numbers.
Examples
In [1]: import datetools
In [2]: datetools.daysact('9/7/2002')
Out[2]: 731466.0
In [3]: datetools.daysact(['09/07/2002', '10/22/2002', '11/05/2002'])
Out[3]: [731466.0, 731511.0, 731525.0]
In [4]: datetools.daysact('7-sep-2002', '25-dec-2002')
Out[4]: 109.0
In [5]: datetools.daysact(['09/07/2002', '10/22/2002', '11/05/2002'], '12/25/2002')
Out[5]: [109.0, 64.0, 50.0]
In [6]: datetools.daysact('12/25/2002', ['09/07/2002', '10/22/2002', '11/05/2002'])
Out[6]: [-109.0, -64.0, -50.0]
-------------------------------------------------------------------------------
"""
if endDate == None:
if type(startDate) in [list, tuple]:
return tuple([daysact(_startDate) for _startDate in startDate])
else:
return datenum(startDate)
else:
if type(startDate) in [list, tuple]:
if type(endDate) in [list, tuple]:
assert len(startDate) == len(endDate), "len(startDate) != len(endDate)"
return tuple([daysact(startDate, endDate) for startDate, endDate in zip(startDate, endDate)])
else:
return tuple([daysact(_startDate, endDate) for _startDate in startDate])
elif type(endDate) in [list, tuple]:
if type(startDate) in [list, tuple]:
assert len(startDate) == len(endDate), "len(startDate) != len(endDate)"
return tuple([daysact(startDate, endDate) for startDate, endDate in zip(startDate, endDate)])
else:
return tuple([daysact(startDate, _endDate) for _endDate in endDate])
else:
return datenum(endDate) - datenum(startDate)
def lweekdate(weekday, year, month, nextDay=0):
"""
Usage
lastDate = lweekdate(weekday, year, month, nextDay)
Notes
Date of last occurrence of weekday in month
returns the serial date number for the last occurrence of Weekday in the given
year and month and in a week that also contains NextDay.
Weekday Weekday whose date you seek. Enter as an integer from 1 through 7:
1 Sunday
2 Monday
3 Tuesday
4 Wednesday
5 Thursday
6 Friday
7 Saturday
Year Year. Enter as a four-digit integer.
Month Month. Enter as an integer from 1 through 12.
Not Implemented
NextDay (Optional) Weekday that must occur after Weekday in the same week.
Enter as an integer from 0 through 7, where 0 = ignore (default) and 1 through 7
are the same as for Weekday.
Any input can contain multiple values, but if so, all other inputs must contain
the same number of values or a single value that applies to all.
See Also
Use the function datestr to convert serial date numbers to formatted date strings.
Examples
"""
assert weekday in range(1,8), "weekday must be in range(1,8)"
assert month in range(1,13), "month must be in range(1,13)"
assert year in range(0, 10000), "year must be in range(0,10000)"
assert nextDay in range(0,8), "weekday must be in range(0,8)"
day = calendar.monthcalendar(year,month)[-1][weekday-1]
if day == 0:
day = calendar.monthcalendar(year,month)[-2][weekday-1]
return datenum(year, month, day)
# -----------------------------------------------------------------------------
# private functions
# -----------------------------------------------------------------------------
def __string2num(dateString):
dateTime = __dateParse(dateString)
return __datenum(dateTime)
def __dateints2num(year, month, day, hour = 0, minute = 0, second = 0):
"""
"""
assert year >= 0
assert month >= 1 and month <= 12
assert day >= 1 and day <= 31
assert hour >= 0 and hour <= 23
assert minute >= 0 and minute <= 59
assert second >= 0 and second <= 59
dateTime = datetime.datetime(year=year, month=month, day=day,
hour=hour, minute=minute, second=second)
return __datenum(dateTime)
def __datenum(dateTime):
return round(366 + datetime.date(dateTime.year, dateTime.month, dateTime.day).toordinal() + \
(((dateTime.hour * 3600) + (dateTime.minute * 60) + dateTime.second)/86400.0), 4)
def __isLeap(year):
if year % 400 == 0 or (year % 100 != 0 and year % 4 == 0):
return True
else:
return False
def __ndays(year, months, day):
if __isLeap(year):
if __leapDays[months] < day:
return __leapDays[months]
else:
return day
else:
if __days[months] < day:
return __days[months]
else:
return day
def __dateParse(dateString):
try: # '19-may-1999 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y %H:%M:%S")[0:6])
except:
pass
try: # '19-may-1999, 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y, %H:%M:%S")[0:6])
except:
pass
try: # '19-may-1999 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y %I:%M:%S %p")[0:6])
except:
pass
try: # '19-may-1999, 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y, %I:%M:%S %p")[0:6])
except:
pass
try: # '5-19-1999 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y %H:%M:%S")[0:6])
except:
pass
try: # '5-19-1999, 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y, %H:%M:%S")[0:6])
except:
pass
try: # '5-19-1999 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y %I:%M:%S %p")[0:6])
except:
pass
try: # '5-19-1999, 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y, %I:%M:%S %p")[0:6])
except:
pass
try: # '5/19/99/6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y/%I:%M:%S %p")[0:6])
except:
pass
try: # '5/19/99 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y %H:%M:%S")[0:6])
except:
pass
try: # '5-19-99 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y %I:%M:%S %p")[0:6])
except:
pass
try: # '5-19-99 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y %H:%M:%S")[0:6])
except:
pass
try: # '5-19-99, 6:37:26 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y, %I:%M:%S %p")[0:6])
except:
pass
try: # '5-19-99, 18:37:26'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y, %H:%M:%S")[0:6])
except:
pass
try: # '19-may-1999 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y %I:%M %p")[0:5])
except:
pass
try: # '19-may-1999, 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y, %I:%M %p")[0:5])
except:
pass
try: # '19-may-1999 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y %H:%M")[0:5])
except:
pass
try: # '19-may-1999, 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y, %H:%M")[0:5])
except:
pass
try: # '5-19-1999 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y %I:%M %p")[0:5])
except:
pass
try: # '5-19-1999, 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y, %I:%M %p")[0:5])
except:
pass
try: # '5-19-1999 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y %H:%M")[0:5])
except:
pass
try: # '5-19-1999, 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y, %H:%M")[0:5])
except:
pass
try: # '5/19/99/6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y/%I:%M %p")[0:5])
except:
pass
try: # '5-19-99 6:37 pm'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y %I:%M %p")[0:5])
except:
pass
try: # '5/19/99/18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y/%H:%M")[0:5])
except:
pass
try: # '5-19-99 18:37'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y %H:%M")[0:5])
except:
pass
try: # '5/19/99'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%y")[0:3])
except:
pass
try: # '5-19-99'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%y")[0:3])
except:
pass
try: # '5/19/1999'
return datetime.datetime(*time.strptime(dateString.lower(), "%m/%d/%Y")[0:3])
except:
pass
try: # '5-19-1999'
return datetime.datetime(*time.strptime(dateString.lower(), "%m-%d-%Y")[0:3])
except:
pass
try: # '19-may-99'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%y")[0:3])
except:
pass
try: # 'may 19, 1999'
return datetime.datetime(*time.strptime(dateString.lower(), "%b %d, %Y")[0:3])
except:
pass
try: # '19-may-1999'
return datetime.datetime(*time.strptime(dateString.lower(), "%d-%b-%Y")[0:3])
except:
pass
try: # '5/19' current year assumed
temp = datetime.datetime(*time.strptime(dateString.lower(), "%m/%d")[1:3])
temp.insert(0, datetime.date.today().year)
return datetime.datetime(*tuple(temp))
except:
pass
try: # '19-may' cureent year assumed
temp = list(time.strptime('19-may'.lower(), "%d-%b")[1:3])
temp.insert(0, datetime.date.today().year)
return datetime.datetime(*tuple(temp))
except:
pass
raise ValueError ('Unable to parse dateString. Format Error ' + dateString)
__isFloat = lambda n: round(n) - n != 0
__isInteger = lambda n: round(n) - n == 0
__numberSplit = lambda n: (math.floor(n), n - math.floor(n))
__timeTuple = lambda n: \
(int(__numberSplit(__numberSplit(n)[1] * 24)[0]), \
int(__numberSplit(__numberSplit(__numberSplit(n)[1] * 24)[1] * 60)[0]), \
int(round(__numberSplit(__numberSplit(__numberSplit(n)[1] * 24)[1] * 60)[1] * 60)))
__dateTuple = lambda n: __fromordinal(int(math.floor(n-366))).timetuple()[0:3]
__dateTime = lambda n: datetime.datetime(*(__dateTuple(n) + __timeTuple(n)))
| Python |
#!/usr/bin/env python
import finpy
from finpy.financial import *
import datetools
cf1 = [536]
cf = cf1 * 145
cf.insert(0, -50600)
print '145 payments plan IRR is', finpy.irr(cf) * 100
cf1 = [1014]
cf = cf1 * 60
cf.insert(0, -50600)
print '60 payments plan IRR is', finpy.irr(cf) * 100
cf1 = [1842]
cf = cf1 * 245
cf.insert(0, -268000)
print '245 payments plan IRR is', finpy.irr(cf) * 100
bond.yieldcurve()
| Python |
#!/usr/bin/env python
import finpy
from finpy.financial import *
import datetools
cf1 = [536]
cf = cf1 * 145
cf.insert(0, -50600)
print '145 payments plan IRR is', finpy.irr(cf) * 100
cf1 = [1014]
cf = cf1 * 60
cf.insert(0, -50600)
print '60 payments plan IRR is', finpy.irr(cf) * 100
cf1 = [1842]
cf = cf1 * 245
cf.insert(0, -268000)
print '245 payments plan IRR is', finpy.irr(cf) * 100
bond.yieldcurve()
| Python |
#!/usr/bin/env python
# file pyvest.py
import datetime
import os.path
import data
from pysqlite2 import dbapi2 as sqlite
import pylab
__version__ = "$Revision: 1.10 $"
__author__ = "Ramesh Balasubramanian <ramesh@finpy.org>"
def compoundInterest(presentValue,
periodicRate,
periodsCount):
"""
-----------------------------------------------------------------------
Function: compoundInterest(presentValue, periodicRate, periodsCount)
Compounding is based on the priciple of earning interest-on-interest.
Components of compound interest calculation:
- Principal (a.k.a Present Value) - original amount being invested.
- Rate of interest - interest earned per period
- Time Periods: number of periods over which interest is earned.
Calculation:
- Future Value = PresentValue * ((1 + Interest) ** TimePeriods)
-----------------------------------------------------------------------
Parameters:
- presentValue, > 0
- periodicRate, > 0, expressed as a percentage value
- periodsCount, >=0
-----------------------------------------------------------------------
Functionality:
- Computes future value for a number of time periods.
- Plots a graph with time periods on x-axis and value on y-axis
-----------------------------------------------------------------------
Sample Usage:
-----------------------------------------------------------------------
import pyvest
ci = pyvest.compoundInterest(presentValue = 30000,
periodicRate = 6.5,
periodsCount = 20)
print ci
-----------------------------------------------------------------------
"""
assert periodsCount > 0, "Number of periods must be > 0"
assert presentValue > 0, "PV must be > 0"
assert periodicRate >= 0, "Interest Rate must be >= 0"
futureValues = \
[round(presentValue * (1.0 + (periodicRate / 100.0)) ** period) \
for period in range(periodsCount + 1)]
pylab.xlabel('Time Periods')
pylab.ylabel('Future Values')
pylab.title('Compound Interest ' + '%.2f' % (periodicRate) + '%')
assert futureValues != None
assert len(futureValues[1:]) == len(range(periodsCount))
pylab.bar(range(periodsCount), futureValues[1:], width = 1)
limits = \
[0, periodsCount+0.5, futureValues[0], (1.5 * futureValues[-1]) - \
(0.5 * futureValues[-2])]
pylab.axis(limits)
pylab.yticks(futureValues, ['%d' % fv for fv in futureValues])
pylab.show()
return futureValues
def bondMeasures(price, # clean price
maturityDate, # datetime.date datatype
settlementDate, # datetime.date datatype
coupon = 0, # percentage
principal = 1000, # principal paid, if held to maturity
couponsPerYear = 2): # number of coupons per year
"""
-----------------------------------------------------------------------
Function bondMeasures(price, maturityDate, settlementDate,
coupon = 0, principal = 1000, couponsPerYear = 2)
Returns the following as a tuple:
1. accreued interest
2. current yield
3. ytm
4. amount and timing of cash flows
5. reinvestment income to be earned to realize ytm
-----------------------------------------------------------------------
"""
# from maturityDate compute the last coupon date
# compute accrued interest from last coupon date and settlement date
if __name__ == '__main__':
pass
| Python |
#!/opt/ActivePython/bin/python
# daily.py
#--------------------------------------------------------------------------------------------
# Run this script from the same directory where treasury.db is located
#--------------------------------------------------------------------------------------------
__version__ = "$Revision: 1.8 $"
__author__ = 'Ramesh Balasubrmanian <ramesh@finpy.org>'
__doc__ = """
Runs on weekdays at 6:00 pm US EST to check if new data is available in treasury website
Any new data is loaded into treasury.db
Usage: daily.py [/path/to/treasury.db]
if path is not supplied treasury.db is assumed to be in the current directory.
"""
import urllib
from xml.dom import minidom
import datetime
import time
import sys
from pysqlite2 import dbapi2 as sqlite
tagNames = ('BC_1MONTH', \
'BC_3MONTH', \
'BC_6MONTH', \
'BC_1YEAR', \
'BC_2YEAR', \
'BC_3YEAR', \
'BC_5YEAR', \
'BC_7YEAR', \
'BC_10YEAR', \
'BC_20YEAR', \
'BC_30YEAR')
url = 'http://www.ustreas.gov/offices/domestic-finance/debt-management/interest-rate/yield.xml'
def _setYieldData(ydata, yieldData, key):
for y in ydata:
for ycn in y.childNodes:
if ycn.firstChild != None and ycn.nodeName in tagNames:
try:
yieldData[key][list(tagNames).index(ycn.nodeName)] = \
'%.2f' % float(ycn.firstChild.nodeValue)
except ValueError:
yieldData[key][list(tagNames).index(ycn.nodeName)] = ''
def _fetchCurrent(url, yieldData):
xmldata = urllib.urlopen(url).read()
doc = minidom.parseString(xmldata)
dates = doc.childNodes[0].getElementsByTagName('G_NEW_DATE')
for newDate in dates:
key = newDate.getElementsByTagName('BID_CURVE_DATE')[0].firstChild.nodeValue
keyDate = datetime.date(*time.strptime(key.lower(), "%d-%b-%y")[0:3])
keyDateOrdinal = repr(keyDate.toordinal())
yieldData[keyDateOrdinal] = ['', '', '', '', '', '', '', '', '', '', '']
ydata = newDate.getElementsByTagName('G_BC_CAT')
_setYieldData(ydata, yieldData, keyDateOrdinal)
def checkData(dbf = 'treasury.db'):
connection = sqlite.connect(dbf)
cursor = connection.cursor()
sql = """select max(record_date)
from yield_curve"""
cursor.execute(sql)
row = cursor.fetchone()
if row == None:
print 'Yield Curve data not available in db. Unexpected error'
else:
print 'Most recent record_date in database is', datetime.date.fromordinal(row[0])
return row[0]
def getData(maxRecordDate = 0):
"""
fetch current month yield curve data from treasury web site.
"""
yieldData = {}
_fetchCurrent(url, yieldData)
for k, v in yieldData.items():
if v == ['', '', '', '', '', '', '', '', '', '', ''] or int(k) <= int(maxRecordDate):
yieldData.pop(k,v)
return yieldData
def writeToDb(yieldData, dbf = 'treasury.db'):
connection = sqlite.connect(dbf)
cursor = connection.cursor()
sqls = ["insert into yield_curve values(" + k + ',' + ','.join([rate for rate in v ]) + ")" \
for k, v in yieldData.items()]
count = [cursor.execute(sql) for sql in sqls]
print len(count),' records added to treasury.db'
connection.commit()
cursor.close()
connection.close()
if __name__ == '__main__':
print __doc__
if len(sys.argv) == 2:
writeToDb(getData(checkData(sys.argv[1])), sys.argv[1])
else:
writeToDb(getData(checkData()))
| Python |
#!/opt/ActivePython/bin/python
# daily.py
#--------------------------------------------------------------------------------------------
# Run this script from the same directory where treasury.db is located
#--------------------------------------------------------------------------------------------
__version__ = "$Revision: 1.8 $"
__author__ = 'Ramesh Balasubrmanian <ramesh@finpy.org>'
__doc__ = """
Runs on weekdays at 6:00 pm US EST to check if new data is available in treasury website
Any new data is loaded into treasury.db
Usage: daily.py [/path/to/treasury.db]
if path is not supplied treasury.db is assumed to be in the current directory.
"""
import urllib
from xml.dom import minidom
import datetime
import time
import sys
from pysqlite2 import dbapi2 as sqlite
tagNames = ('BC_1MONTH', \
'BC_3MONTH', \
'BC_6MONTH', \
'BC_1YEAR', \
'BC_2YEAR', \
'BC_3YEAR', \
'BC_5YEAR', \
'BC_7YEAR', \
'BC_10YEAR', \
'BC_20YEAR', \
'BC_30YEAR')
url = 'http://www.ustreas.gov/offices/domestic-finance/debt-management/interest-rate/yield.xml'
def _setYieldData(ydata, yieldData, key):
for y in ydata:
for ycn in y.childNodes:
if ycn.firstChild != None and ycn.nodeName in tagNames:
try:
yieldData[key][list(tagNames).index(ycn.nodeName)] = \
'%.2f' % float(ycn.firstChild.nodeValue)
except ValueError:
yieldData[key][list(tagNames).index(ycn.nodeName)] = ''
def _fetchCurrent(url, yieldData):
xmldata = urllib.urlopen(url).read()
doc = minidom.parseString(xmldata)
dates = doc.childNodes[0].getElementsByTagName('G_NEW_DATE')
for newDate in dates:
key = newDate.getElementsByTagName('BID_CURVE_DATE')[0].firstChild.nodeValue
keyDate = datetime.date(*time.strptime(key.lower(), "%d-%b-%y")[0:3])
keyDateOrdinal = repr(keyDate.toordinal())
yieldData[keyDateOrdinal] = ['', '', '', '', '', '', '', '', '', '', '']
ydata = newDate.getElementsByTagName('G_BC_CAT')
_setYieldData(ydata, yieldData, keyDateOrdinal)
def checkData(dbf = 'treasury.db'):
connection = sqlite.connect(dbf)
cursor = connection.cursor()
sql = """select max(record_date)
from yield_curve"""
cursor.execute(sql)
row = cursor.fetchone()
if row == None:
print 'Yield Curve data not available in db. Unexpected error'
else:
print 'Most recent record_date in database is', datetime.date.fromordinal(row[0])
return row[0]
def getData(maxRecordDate = 0):
"""
fetch current month yield curve data from treasury web site.
"""
yieldData = {}
_fetchCurrent(url, yieldData)
for k, v in yieldData.items():
if v == ['', '', '', '', '', '', '', '', '', '', ''] or int(k) <= int(maxRecordDate):
yieldData.pop(k,v)
return yieldData
def writeToDb(yieldData, dbf = 'treasury.db'):
connection = sqlite.connect(dbf)
cursor = connection.cursor()
sqls = ["insert into yield_curve values(" + k + ',' + ','.join([rate for rate in v ]) + ")" \
for k, v in yieldData.items()]
count = [cursor.execute(sql) for sql in sqls]
print len(count),' records added to treasury.db'
connection.commit()
cursor.close()
connection.close()
if __name__ == '__main__':
print __doc__
if len(sys.argv) == 2:
writeToDb(getData(checkData(sys.argv[1])), sys.argv[1])
else:
writeToDb(getData(checkData()))
| Python |
#!/usr/bin/env python
#
# this file exists to make this directory look like a python module, so that treasury.db file
# can be loaded into YieldCurve class.
#
__version__ = "$Revision: 1.3 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
| Python |
#!/usr/bin/env python
#
# this file exists to make this directory look like a python module, so that treasury.db file
# can be loaded into YieldCurve class.
#
__version__ = "$Revision: 1.3 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
| Python |
#!/usr/bin/env python
#
#
#
__version__ = "$Revision: 1.20 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
__doc__ = \
"""
_____ _ _ _ _____ _ _ ____________________________________________
| | |\\ | | | \\ / Finpy: Python Module for Financial Analysis
| | | \\ | | | \\ / Copyright (c) 2006
|-- | | \\ | |---| \\/ World Wide Web: http://www.finpy.org
| | | \\ | | / Bugs: http://finpy.org/bugs
| | | \\| | / Version: Jan 2007
___________________________________________________________________________
"""
from financial.bond import beytbill, acrubond, yielddata, yieldcurve
from financial.tvm import effrr, npv, irr
from financial.currency import thirtytwo2dec
# from pyvest import yieldCurve, compoundInterest
def test():
from finpy.financial.tests.test_bond import BondTestCase
from finpy.financial.tests.test_tvm import TvmTestCase
from finpy.financial.tests.test_currency import CurrencyTestCase
import unittest
suites = []
suites.append(unittest.makeSuite(BondTestCase,'test'))
suites.append(unittest.makeSuite(TvmTestCase,'test'))
suites.append(unittest.makeSuite(CurrencyTestCase,'test'))
alltests = unittest.TestSuite(tuple(suites))
runner = unittest.TextTestRunner()
runner.run(alltests)
if __name__ == '__main__':
print __doc__ | Python |
#!/usr/bin/env python
# file pyvest.py
import datetime
import os.path
import data
from pysqlite2 import dbapi2 as sqlite
import pylab
__version__ = "$Revision: 1.10 $"
__author__ = "Ramesh Balasubramanian <ramesh@finpy.org>"
def compoundInterest(presentValue,
periodicRate,
periodsCount):
"""
-----------------------------------------------------------------------
Function: compoundInterest(presentValue, periodicRate, periodsCount)
Compounding is based on the priciple of earning interest-on-interest.
Components of compound interest calculation:
- Principal (a.k.a Present Value) - original amount being invested.
- Rate of interest - interest earned per period
- Time Periods: number of periods over which interest is earned.
Calculation:
- Future Value = PresentValue * ((1 + Interest) ** TimePeriods)
-----------------------------------------------------------------------
Parameters:
- presentValue, > 0
- periodicRate, > 0, expressed as a percentage value
- periodsCount, >=0
-----------------------------------------------------------------------
Functionality:
- Computes future value for a number of time periods.
- Plots a graph with time periods on x-axis and value on y-axis
-----------------------------------------------------------------------
Sample Usage:
-----------------------------------------------------------------------
import pyvest
ci = pyvest.compoundInterest(presentValue = 30000,
periodicRate = 6.5,
periodsCount = 20)
print ci
-----------------------------------------------------------------------
"""
assert periodsCount > 0, "Number of periods must be > 0"
assert presentValue > 0, "PV must be > 0"
assert periodicRate >= 0, "Interest Rate must be >= 0"
futureValues = \
[round(presentValue * (1.0 + (periodicRate / 100.0)) ** period) \
for period in range(periodsCount + 1)]
pylab.xlabel('Time Periods')
pylab.ylabel('Future Values')
pylab.title('Compound Interest ' + '%.2f' % (periodicRate) + '%')
assert futureValues != None
assert len(futureValues[1:]) == len(range(periodsCount))
pylab.bar(range(periodsCount), futureValues[1:], width = 1)
limits = \
[0, periodsCount+0.5, futureValues[0], (1.5 * futureValues[-1]) - \
(0.5 * futureValues[-2])]
pylab.axis(limits)
pylab.yticks(futureValues, ['%d' % fv for fv in futureValues])
pylab.show()
return futureValues
def bondMeasures(price, # clean price
maturityDate, # datetime.date datatype
settlementDate, # datetime.date datatype
coupon = 0, # percentage
principal = 1000, # principal paid, if held to maturity
couponsPerYear = 2): # number of coupons per year
"""
-----------------------------------------------------------------------
Function bondMeasures(price, maturityDate, settlementDate,
coupon = 0, principal = 1000, couponsPerYear = 2)
Returns the following as a tuple:
1. accreued interest
2. current yield
3. ytm
4. amount and timing of cash flows
5. reinvestment income to be earned to realize ytm
-----------------------------------------------------------------------
"""
# from maturityDate compute the last coupon date
# compute accrued interest from last coupon date and settlement date
if __name__ == '__main__':
pass
| Python |
#!/usr/bin/env python
#
#
#
__version__ = "$Revision: 1.20 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
__doc__ = \
"""
_____ _ _ _ _____ _ _ ____________________________________________
| | |\\ | | | \\ / Finpy: Python Module for Financial Analysis
| | | \\ | | | \\ / Copyright (c) 2006
|-- | | \\ | |---| \\/ World Wide Web: http://www.finpy.org
| | | \\ | | / Bugs: http://finpy.org/bugs
| | | \\| | / Version: Jan 2007
___________________________________________________________________________
"""
from financial.bond import beytbill, acrubond, yielddata, yieldcurve
from financial.tvm import effrr, npv, irr
from financial.currency import thirtytwo2dec
# from pyvest import yieldCurve, compoundInterest
def test():
from finpy.financial.tests.test_bond import BondTestCase
from finpy.financial.tests.test_tvm import TvmTestCase
from finpy.financial.tests.test_currency import CurrencyTestCase
import unittest
suites = []
suites.append(unittest.makeSuite(BondTestCase,'test'))
suites.append(unittest.makeSuite(TvmTestCase,'test'))
suites.append(unittest.makeSuite(CurrencyTestCase,'test'))
alltests = unittest.TestSuite(tuple(suites))
runner = unittest.TextTestRunner()
runner.run(alltests)
if __name__ == '__main__':
print __doc__ | Python |
#!/usr/bin/env python
# tvm.py
__version__ = "$Revision: 1.6 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
import math
import sys
def effrr(rate, numPeriods = 0):
"""
-------------------------------------------------------------------------------
Usage
return = effrr(rate, numPeriods)
continuousCompoundedReturn = effrr(rate)
Notes
effrr - Effective rate of return
rate - Annual percentage rate. Enter as a decimal fraction.
numPeriods - Number of compounding periods per year, an integer.
return = effrr(rate, numPeriods) calculates the annual effective rate
of return.
return = effrr(rate) returns the continuous compounding rate (e^Rate-1)
Examples
In [1]: import finpy
In [2]: finpy.effrr(0.09, 12)
Out[2]: 0.0938
-------------------------------------------------------------------------------
"""
assert numPeriods >= 0, "numPeriods must be >= 0"
if numPeriods > 0:
return round(((1 + (1.0 * rate/numPeriods))** numPeriods) - 1, 4)
elif numPeriods == 0:
return round((math.e ** rate - 1), 4)
def npv(cashFlow, rate):
"""
-------------------------------------------------------------------------------
Usage
finpy.npv(cashFlowsList, discountRate)
Notes
NPV is the sum of present values of all the expected incremental cash flows
discounted with the discountRate
Examples
In [1]: import finpy
In [2]: finpy.npv([-2000, 1000, 800, 600, 200], 0.1)
Out[2]: 157.64
-------------------------------------------------------------------------------
"""
if type(rate) == list or type(rate) == tuple:
return [npv(cashFlow, _rate) for _rate in rate]
assert len(cashFlow) > 0, "There must be some cashFlow to compute npv"
assert rate > 0, "discountRate must be >0 to compute npv"
return round(sum([amount/((1+rate) ** index) for index, amount in \
zip(range(len(cashFlow)), cashFlow)]), 2)
def irr(cashFlow):
"""
-------------------------------------------------------------------------------
Usage
finpy.irr(cashFlowsList)
Notes
IRR is the discount rate that makes NPV = 0
Examples
In [1]: import finpy
In [2]: finpy.irr([-2000, 1000, 800, 600, 200])
Out[2]: 0.1449
-------------------------------------------------------------------------------
"""
if sys.version.find('.NET') != -1:
raise NotImplementedError, "No irr in finpy under IronPython"
import numpy
assert len(cashFlow) > 0, "There must be some cashFlow to compute irr"
cashFlow.reverse()
p = numpy.poly1d(cashFlow)
solution = numpy.roots(p)
i = 0
I = numpy.NaN
for _solution in solution:
if _solution.imag == 0 and _solution.real > 0 and _solution.real <= 1:
I = round((1 / _solution.real) - 1, 4)
break
return I
| Python |
#!/usr/bin/env python
import math
import datetime
import datetools
import os.path
import finpy.data
from pysqlite2 import dbapi2 as sqlite
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
__version__ = "$Revision: 1.14 $"
__credits__ = 'Interface defintion and comments based on MATLAB functions in financial toolbox'
"""
TODO:
1. yielddata and yieldcurve -- if data is not available for requested date, show data for closest
date for which data is available, not most recent
2. support multiple dates and things like Q1 -- and ability to average ...
3. diagram should be refreshed -- for each call ...
"""
def beytbill(settlement, maturity, discountRate):
"""
-------------------------------------------------------------------------------
Usage
settlement
Enter as finpy.datenum or dateString.
settlementDate must be earlier than or equal to maturityDate.
maturity
Enter as finpy.datenum or dateString.
discountRate
Discount rate of the Treasury bill. Enter as decimal fraction.
Notes
Bond equivalent yield for Treasury bill
Examples
In [1]: import finpy
In [2]: rate = finpy.beytbill('11-Feb-2000', '8/7/00', 0.0577)
In [3]: print rate
0.0602
-------------------------------------------------------------------------------
"""
assert discountRate >= 0 and discountRate <= 1, "discountRate must be >= 0 and <= 1"
settlementDate = datetools.datenum(settlement)
maturityDate = datetools.datenum(maturity)
ndays = maturityDate - settlementDate
assert ndays >= 0, "settlementDate must be earlier than or equal to maturityDate"
discountAmount = (1.0 * ndays/360) * 100 * discountRate
price = 100 - discountAmount
yd = 100/price
t = (365 * 1.0 /ndays)
return round((math.sqrt(yd ** t) - 1) * 2, 4)
def yielddata(recordDate = datetools.today()):
"""
Usage
recent = yielddata()
data = yielddata(date)
Notes
returns treasury yield data for a given date. date defaults to today or the most
recent date for which yield information is available.
returned dictionary has the different maturity (in years) as key and the yield rate
(in percentage) as value.
Examples
"""
sql = """select * from yield_curve where record_date = %d""" \
% (recordDate)
row = __fetchdata(sql)
if row == None:
sql = """select *
from yield_curve
where record_date = (select max(record_date) from yield_curve)"""
row = __fetchdata(sql)
xaxis = [1.0/12, 3.0/12, 6.0/12, 1, 2, 3, 5, 7, 10, 20, 30]
yaxis = list(row[1:])
periodRate = filter(lambda pair: pair[1] != '', zip(xaxis,yaxis))
x = [pair[0] for pair in periodRate]
y = [pair[1] for pair in periodRate]
return datetools.datestr(row[0]+366), dict(iter(zip(x,y)))
def yieldcurve(recordDate = datetools.today(), file = None):
"""
Usage
Notes
Examples
"""
import pylab
date, data = yielddata(recordDate)
x = data.keys()
x.sort()
y = [data[i] for i in x]
limits = [0, max(x)+2, 0, max(y)*1.5]
pylab.xlabel('Maturity (years)')
pylab.ylabel('Yield (percentage)')
pylab.title('Yield Curve ' + date)
assert y != None
assert len(x) == len(y)
pylab.plot(x,y)
pylab.axis(limits)
pylab.show()
if file != None:
pylab.savefig(file, format='png', dpi=60)
def acrubond(issueDate, settlementDate, firstCouponDate, faceValue, couponRate, period = 2, basis = 0):
"""
-------------------------------------------------------------------------------
Description
Accrued interest of security with periodic interest payments
Parameters
issueDate Enter as serial date number.
settlementDate Enter as serial date number.
firstCouponDate Enter as serial date number.
face Redemption (par, face) value.
couponRate Enter as decimal fraction.
(period) Coupons per year of the bond.
Allowed values are 1, 2 (default), 3, 4, 6, and 12.
(basis) Day-count basis of the instrument. A vector of integers.
0 = actual/actual (default)
1 = 30/360 (SIA)
2 = actual/360
3 = actual/365
4 = 30/360 (PSA)
5 = 30/360 (ISDA)
6 = 30/360 (European)
7 = actual/365 (Japanese)
Not Implemented
basis
-------------------------------------------------------------------------------
"""
assert period in [1, 2, 3, 4, 6, 12]
if settlementDate > firstCouponDate:
# bug -- should really find the nearest coupon payments before and after the
# settlement date and go off of that.
# best to implement the cashflows and use that method internally for implementing this.
nextCouponDate = addmonths(firstCouponDate, 12/period)
lastCouponDate = firstCouponDate
else:
nextCouponDate = firstCouponDate
lastCouponDate = issueDate
return \
faceValue*couponRate*\
(1.0*(settlementDate-lastCouponDate)/(nextCouponDate-lastCouponDate))/period
def __fetchdata(sql):
dataDirPath = os.path.split(finpy.data.__file__)[0]
dataFilePath = os.path.join(dataDirPath, 'treasury.db')
connection = sqlite.connect(dataFilePath)
cursor = connection.cursor()
cursor.execute(sql)
row = cursor.fetchone()
cursor.close()
connection.close()
return row
if __name__ == '__main__':
pass
| Python |
#!/usr/bin/env python
__version__ = "$Revision: 1.5 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
| Python |
#!/usr/bin/env python
# tvm.py
__version__ = "$Revision: 1.6 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
import math
import sys
def effrr(rate, numPeriods = 0):
"""
-------------------------------------------------------------------------------
Usage
return = effrr(rate, numPeriods)
continuousCompoundedReturn = effrr(rate)
Notes
effrr - Effective rate of return
rate - Annual percentage rate. Enter as a decimal fraction.
numPeriods - Number of compounding periods per year, an integer.
return = effrr(rate, numPeriods) calculates the annual effective rate
of return.
return = effrr(rate) returns the continuous compounding rate (e^Rate-1)
Examples
In [1]: import finpy
In [2]: finpy.effrr(0.09, 12)
Out[2]: 0.0938
-------------------------------------------------------------------------------
"""
assert numPeriods >= 0, "numPeriods must be >= 0"
if numPeriods > 0:
return round(((1 + (1.0 * rate/numPeriods))** numPeriods) - 1, 4)
elif numPeriods == 0:
return round((math.e ** rate - 1), 4)
def npv(cashFlow, rate):
"""
-------------------------------------------------------------------------------
Usage
finpy.npv(cashFlowsList, discountRate)
Notes
NPV is the sum of present values of all the expected incremental cash flows
discounted with the discountRate
Examples
In [1]: import finpy
In [2]: finpy.npv([-2000, 1000, 800, 600, 200], 0.1)
Out[2]: 157.64
-------------------------------------------------------------------------------
"""
if type(rate) == list or type(rate) == tuple:
return [npv(cashFlow, _rate) for _rate in rate]
assert len(cashFlow) > 0, "There must be some cashFlow to compute npv"
assert rate > 0, "discountRate must be >0 to compute npv"
return round(sum([amount/((1+rate) ** index) for index, amount in \
zip(range(len(cashFlow)), cashFlow)]), 2)
def irr(cashFlow):
"""
-------------------------------------------------------------------------------
Usage
finpy.irr(cashFlowsList)
Notes
IRR is the discount rate that makes NPV = 0
Examples
In [1]: import finpy
In [2]: finpy.irr([-2000, 1000, 800, 600, 200])
Out[2]: 0.1449
-------------------------------------------------------------------------------
"""
if sys.version.find('.NET') != -1:
raise NotImplementedError, "No irr in finpy under IronPython"
import numpy
assert len(cashFlow) > 0, "There must be some cashFlow to compute irr"
cashFlow.reverse()
p = numpy.poly1d(cashFlow)
solution = numpy.roots(p)
i = 0
I = numpy.NaN
for _solution in solution:
if _solution.imag == 0 and _solution.real > 0 and _solution.real <= 1:
I = round((1 / _solution.real) - 1, 4)
break
return I
| Python |
#!/usr/bin/env python
# currency.py
import math
__version__ = "$Revision: 1.2 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
def thirtytwo2dec(inNumber, inFraction):
"""
-------------------------------------------------------------------------------
Usage
Notes
Examples
-------------------------------------------------------------------------------
"""
assert inFraction in range(0,32), "0 <= inFraction < 32 must hold"
assert inNumber - math.floor(inNumber) == 0, "inNumber must be an integer"
return round(inNumber + (inFraction * 1.0/32), 4) | Python |
#!/usr/bin/env python
# test_bond.py
__version__ = "$Revision: 1.3 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
import finpy
import unittest
class BondTestCase(unittest.TestCase):
def test_beytbill(self):
assert finpy.beytbill('11-Feb-2000', '8/7/00', 0.0577) == 0.0602
assert finpy.beytbill('11-feb-2000', '11-jul-2000', 0.0514) == finpy.beytbill('11-feb-2000', '7/11/2000', 0.0514)
if __name__ == '__main__':
unittest.main() | Python |
#!/usr/bin/env python
# test_currency.py
__version__ = "$Revision: 1.2 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
import finpy
import unittest
import sys
class CurrencyTestCase(unittest.TestCase):
def test_thirtytwo2dec(self):
inNumbers = [101, 102]
inFractions = [25, 31]
outNumbers = [finpy.thirtytwo2dec(inNumber, inFraction) for inNumber, inFraction in zip(inNumbers, inFractions)]
if sys.version.find('.NET') != -1:
assert outNumbers == [101.7812, 102.9688]
else:
assert outNumbers == [101.7813, 102.9688]
if __name__ == '__main__':
unittest.main() | Python |
#!/usr/bin/env python
#
# this file exists to make this directory look like a python module
#
__version__ = "$Revision: 1.4 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
__all__ = ['test_bond', 'test_tvm', 'test_currency'] | Python |
#!/usr/bin/env python
# test_tvm.py
__version__ = "$Revision: 1.4 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
import finpy
import unittest
class TvmTestCase(unittest.TestCase):
def test_effrr(self):
assert finpy.effrr(0.09, 12) == 0.0938
assert finpy.effrr(0.09) == 0.0942
def test_irr(self):
assert finpy.irr([-100000,10000,20000,30000,40000,50000]) == 0.1201
def test_npv(self):
assert finpy.npv([-2000, 1000, 800, 600, 200], 0.1) == 157.64
assert finpy.npv([-2000, 200, 600, 800, 1200], 0.1) == 98.35
assert finpy.npv((-2000, 1000, 800, 600, 200), (0.08,0.1,0.12,0.14)) == \
[235.1, 157.64, 84.78,16.17]
if __name__ == '__main__':
unittest.main() | Python |
#!/usr/bin/env python
#
# this file exists to make this directory look like a python module
#
__version__ = "$Revision: 1.4 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
__all__ = ['test_bond', 'test_tvm', 'test_currency'] | Python |
#!/usr/bin/env python
import math
import datetime
import datetools
import os.path
import finpy.data
from pysqlite2 import dbapi2 as sqlite
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
__version__ = "$Revision: 1.14 $"
__credits__ = 'Interface defintion and comments based on MATLAB functions in financial toolbox'
"""
TODO:
1. yielddata and yieldcurve -- if data is not available for requested date, show data for closest
date for which data is available, not most recent
2. support multiple dates and things like Q1 -- and ability to average ...
3. diagram should be refreshed -- for each call ...
"""
def beytbill(settlement, maturity, discountRate):
"""
-------------------------------------------------------------------------------
Usage
settlement
Enter as finpy.datenum or dateString.
settlementDate must be earlier than or equal to maturityDate.
maturity
Enter as finpy.datenum or dateString.
discountRate
Discount rate of the Treasury bill. Enter as decimal fraction.
Notes
Bond equivalent yield for Treasury bill
Examples
In [1]: import finpy
In [2]: rate = finpy.beytbill('11-Feb-2000', '8/7/00', 0.0577)
In [3]: print rate
0.0602
-------------------------------------------------------------------------------
"""
assert discountRate >= 0 and discountRate <= 1, "discountRate must be >= 0 and <= 1"
settlementDate = datetools.datenum(settlement)
maturityDate = datetools.datenum(maturity)
ndays = maturityDate - settlementDate
assert ndays >= 0, "settlementDate must be earlier than or equal to maturityDate"
discountAmount = (1.0 * ndays/360) * 100 * discountRate
price = 100 - discountAmount
yd = 100/price
t = (365 * 1.0 /ndays)
return round((math.sqrt(yd ** t) - 1) * 2, 4)
def yielddata(recordDate = datetools.today()):
"""
Usage
recent = yielddata()
data = yielddata(date)
Notes
returns treasury yield data for a given date. date defaults to today or the most
recent date for which yield information is available.
returned dictionary has the different maturity (in years) as key and the yield rate
(in percentage) as value.
Examples
"""
sql = """select * from yield_curve where record_date = %d""" \
% (recordDate)
row = __fetchdata(sql)
if row == None:
sql = """select *
from yield_curve
where record_date = (select max(record_date) from yield_curve)"""
row = __fetchdata(sql)
xaxis = [1.0/12, 3.0/12, 6.0/12, 1, 2, 3, 5, 7, 10, 20, 30]
yaxis = list(row[1:])
periodRate = filter(lambda pair: pair[1] != '', zip(xaxis,yaxis))
x = [pair[0] for pair in periodRate]
y = [pair[1] for pair in periodRate]
return datetools.datestr(row[0]+366), dict(iter(zip(x,y)))
def yieldcurve(recordDate = datetools.today(), file = None):
"""
Usage
Notes
Examples
"""
import pylab
date, data = yielddata(recordDate)
x = data.keys()
x.sort()
y = [data[i] for i in x]
limits = [0, max(x)+2, 0, max(y)*1.5]
pylab.xlabel('Maturity (years)')
pylab.ylabel('Yield (percentage)')
pylab.title('Yield Curve ' + date)
assert y != None
assert len(x) == len(y)
pylab.plot(x,y)
pylab.axis(limits)
pylab.show()
if file != None:
pylab.savefig(file, format='png', dpi=60)
def acrubond(issueDate, settlementDate, firstCouponDate, faceValue, couponRate, period = 2, basis = 0):
"""
-------------------------------------------------------------------------------
Description
Accrued interest of security with periodic interest payments
Parameters
issueDate Enter as serial date number.
settlementDate Enter as serial date number.
firstCouponDate Enter as serial date number.
face Redemption (par, face) value.
couponRate Enter as decimal fraction.
(period) Coupons per year of the bond.
Allowed values are 1, 2 (default), 3, 4, 6, and 12.
(basis) Day-count basis of the instrument. A vector of integers.
0 = actual/actual (default)
1 = 30/360 (SIA)
2 = actual/360
3 = actual/365
4 = 30/360 (PSA)
5 = 30/360 (ISDA)
6 = 30/360 (European)
7 = actual/365 (Japanese)
Not Implemented
basis
-------------------------------------------------------------------------------
"""
assert period in [1, 2, 3, 4, 6, 12]
if settlementDate > firstCouponDate:
# bug -- should really find the nearest coupon payments before and after the
# settlement date and go off of that.
# best to implement the cashflows and use that method internally for implementing this.
nextCouponDate = addmonths(firstCouponDate, 12/period)
lastCouponDate = firstCouponDate
else:
nextCouponDate = firstCouponDate
lastCouponDate = issueDate
return \
faceValue*couponRate*\
(1.0*(settlementDate-lastCouponDate)/(nextCouponDate-lastCouponDate))/period
def __fetchdata(sql):
dataDirPath = os.path.split(finpy.data.__file__)[0]
dataFilePath = os.path.join(dataDirPath, 'treasury.db')
connection = sqlite.connect(dataFilePath)
cursor = connection.cursor()
cursor.execute(sql)
row = cursor.fetchone()
cursor.close()
connection.close()
return row
if __name__ == '__main__':
pass
| Python |
#!/usr/bin/env python
# currency.py
import math
__version__ = "$Revision: 1.2 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
def thirtytwo2dec(inNumber, inFraction):
"""
-------------------------------------------------------------------------------
Usage
Notes
Examples
-------------------------------------------------------------------------------
"""
assert inFraction in range(0,32), "0 <= inFraction < 32 must hold"
assert inNumber - math.floor(inNumber) == 0, "inNumber must be an integer"
return round(inNumber + (inFraction * 1.0/32), 4) | Python |
#!/usr/bin/env python
__version__ = "$Revision: 1.5 $"
__author__ = 'Ramesh Balasubramanian <ramesh@finpy.org>'
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2008 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2008 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This page lists the data posted by a form.
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table border="1" cellspacing="0" id="outputSample">
<colgroup><col width="80"><col></colgroup>
<thead>
<tr>
<th>Field Name</th>
<th>Value</th>
</tr>
</thead>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<th>%s</th>
<td><pre>%s</pre></td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Sample page.
"""
import cgi
import os
# Ensure that the fckeditor.py is included in your classpath
import fckeditor
# Tell the browser to render html
print "Content-Type: text/html"
print ""
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Sample</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h1>FCKeditor - Python - Sample 1</h1>
This sample displays a normal HTML form with an FCKeditor with full features
enabled.
<hr>
<form action="sampleposteddata.py" method="post" target="_blank">
"""
# This is the real work
try:
sBasePath = os.environ.get("SCRIPT_NAME")
sBasePath = sBasePath[0:sBasePath.find("_samples")]
oFCKeditor = fckeditor.FCKeditor('FCKeditor1')
oFCKeditor.BasePath = sBasePath
oFCKeditor.Value = """<p>This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>.</p>"""
print oFCKeditor.Create()
except Exception, e:
print e
print """
<br>
<input type="submit" value="Submit">
</form>
"""
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This page lists the data posted by a form.
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table border="1" cellspacing="0" id="outputSample">
<colgroup><col width="80"><col></colgroup>
<thead>
<tr>
<th>Field Name</th>
<th>Value</th>
</tr>
</thead>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<th>%s</th>
<td><pre>%s</pre></td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Sample page.
"""
import cgi
import os
# Ensure that the fckeditor.py is included in your classpath
import fckeditor
# Tell the browser to render html
print "Content-Type: text/html"
print ""
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Sample</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h1>FCKeditor - Python - Sample 1</h1>
This sample displays a normal HTML form with an FCKeditor with full features
enabled.
<hr>
<form action="sampleposteddata.py" method="post" target="_blank">
"""
# This is the real work
try:
sBasePath = os.environ.get("SCRIPT_NAME")
sBasePath = sBasePath[0:sBasePath.find("_samples")]
oFCKeditor = fckeditor.FCKeditor('FCKeditor1')
oFCKeditor.BasePath = sBasePath
oFCKeditor.Value = """<p>This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>.</p>"""
print oFCKeditor.Create()
except Exception, e:
print e
print """
<br>
<input type="submit" value="Submit">
</form>
"""
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| Python |
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = ""
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&Toolbar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
return Html
def IsCompatible(self):
if (os.environ.has_key("HTTP_USER_AGENT")):
sAgent = os.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
print 'Most likely you already have a file with this name.'
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/env python
import codecs
import re
import jinja2
import markdown
def process_slides():
with codecs.open('../../presentation-output.html', 'w', encoding='utf8') as outfile:
md = codecs.open('slides.md', encoding='utf8').read()
md_slides = md.split('\n---\n')
print 'Compiled %s slides.' % len(md_slides)
slides = []
# Process each slide separately.
for md_slide in md_slides:
slide = {}
sections = md_slide.split('\n\n')
# Extract metadata at the beginning of the slide (look for key: value)
# pairs.
metadata_section = sections[0]
metadata = parse_metadata(metadata_section)
slide.update(metadata)
remainder_index = metadata and 1 or 0
# Get the content from the rest of the slide.
content_section = '\n\n'.join(sections[remainder_index:])
html = markdown.markdown(content_section)
slide['content'] = postprocess_html(html, metadata)
slides.append(slide)
template = jinja2.Template(open('base.html').read())
outfile.write(template.render(locals()))
def parse_metadata(section):
"""Given the first part of a slide, returns metadata associated with it."""
metadata = {}
metadata_lines = section.split('\n')
for line in metadata_lines:
colon_index = line.find(':')
if colon_index != -1:
key = line[:colon_index].strip()
val = line[colon_index + 1:].strip()
metadata[key] = val
return metadata
def postprocess_html(html, metadata):
"""Returns processed HTML to fit into the slide template format."""
if metadata.get('build_lists') and metadata['build_lists'] == 'true':
html = html.replace('<ul>', '<ul class="build">')
html = html.replace('<ol>', '<ol class="build">')
return html
if __name__ == '__main__':
process_slides()
| Python |
#!/usr/bin/python
# encoding: utf-8
'''
fbtop.fbtop -- Top like tool for Firebird
'''
import os
import sys
from ui import Ui
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
__all__ = []
__version__ = 0.1
__date__ = '2013-05-19'
__updated__ = '2013-05-19'
DEBUG = 0
TESTRUN = 0
PROFILE = 0
server = None
class CLIError(Exception):
pass
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by John Ryder on %s.
Licensed under the MIT license
http://opensource.org/licenses/mit-license.php
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-s", "--server", dest="server", help="server to monitor")
parser.add_argument("-p", "--password", dest="password", help="SYSDBA password")
parser.add_argument('-v', '--version', action='version', version=program_version_message)
# Process arguments
#args = parser.parse_args()
ui = Ui("localhost", "SYSDBA", "masterkey")
ui.run()
if __name__ == "__main__":
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'fbtop.fbtop_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
| Python |
import copy
import urwid
import stats
class Text(object):
UPTIME = "Uptime: {0}, Firebird uptime: {1}"
ATTACHMENTS = "Attachments: {0}, Attachments/min: {1}"
FILE_HANDLES = "File handles: "
TX_PER_MIN = "Tx/min: "
FOOTER = "(q)uit"
class DatabaseColumn(object):
def __init__(self, humanName, propertyName, weight = 1):
self.humanName = humanName
self.propertyName = propertyName
self.weight = weight
class DatabaseListWalker(urwid.ListWalker):
def __init__(self, databases, columns):
self._databases = databases;
self._databaseRows = dict()
self.focus = 0
self.columns = columns
def get_focus(self):
return self._getAtPos(self.focus)
def set_focus(self, focus):
self.focus = focus
self._modified()
def get_next(self, startFrom):
return self._getAtPos(startFrom + 1)
def get_prev(self, startFrom):
return self._getAtPos(startFrom - 1)
def _getAtPos(self, position):
if position < 0 or position >= len(self._databases):
return None, None
currentDbRow = None
if position in self._databaseRows.keys():
currentDbRow = self._databaseRows[position]
else:
database = self._databases[position]
columnValues = []
for column in self.columns:
attVal = str(getattr(database, column.propertyName))
columnVal = ('weight', column.weight, urwid.Text(attVal, align = 'center'))
columnValues.append(columnVal)
currentDbRow = urwid.Columns(columnValues)
self._databaseRows[position] = currentDbRow
return currentDbRow, position
def invalidate(self):
self._databaseRows = dict()
self._modified()
class DatabaseListBox(urwid.WidgetWrap):
_DEFAULT_COLUMNS = [DatabaseColumn("DATABASE", "path", 5), \
DatabaseColumn("ATT", "attachmentCount"), \
DatabaseColumn("ATT_M", "attachmentsPerMin"), \
DatabaseColumn("TX_M", "transactionsPerMinute"), \
DatabaseColumn("OIT_G", "oitGap"), \
DatabaseColumn("OAT_G", "oatGap"), \
DatabaseColumn("OST_G", "ostGap")]
def __init__(self, databases):
self.selectedColumns = copy.deepcopy(DatabaseListBox._DEFAULT_COLUMNS)
headerColumns = []
for column in self.selectedColumns:
headerColVal = ('weight', column.weight, urwid.Text(column.humanName, align = 'center'))
headerColumns.append(headerColVal)
self.headerRow = urwid.AttrMap(urwid.Columns(headerColumns), 'dbColHeader')
self.listWalker = DatabaseListWalker(databases, self.selectedColumns)
listbox = urwid.ListBox(self.listWalker)
self.body = urwid.Frame(listbox, header = self.headerRow)
urwid.WidgetWrap.__init__(self, self.body)
def invalidate(self):
self.listWalker.invalidate()
class OverallStats(urwid.WidgetWrap):
def __init__(self, server):
self.server = server
self.localStats = server.localStats
self.body = urwid.Pile([urwid.Text('?'), urwid.Text('?')])
self.invalidate()
urwid.WidgetWrap.__init__(self, self.body)
def invalidate(self):
(uptimeText, options) = self.body.contents[0]
uptimeText.set_text(Text.UPTIME.format(self.localStats.uptime, self.localStats.firebirdUptime))
(attachCountText, options) = self.body.contents[1]
attachCountText.set_text(Text.ATTACHMENTS.format(self.server.attachmentCount, self.server.attachmentsPerMin))
class Ui(object):
_PALETTE = [('body', urwid.WHITE, urwid.BLACK), ('dbColHeader', urwid.BLACK, urwid.DARK_GREEN)]
def exitOnQ(self, textInput):
if textInput in ('q', 'Q'):
self.server.disconnect()
raise urwid.ExitMainLoop()
def update(self, main_loop, user_data):
self.server.poll()
self.listbox.invalidate()
self.overallStats.invalidate()
self.loop.set_alarm_in(2.5, self.update)
def __init__(self, serverName, user, password):
self.server = stats.Server(serverName, user, password)
self.overallStats = OverallStats(self.server)
self.listbox = DatabaseListBox(self.server.databases)
footer = urwid.Text(Text.FOOTER)
self.dbFrame = urwid.Frame(urwid.AttrWrap(self.listbox, 'body'), footer = footer)
body = urwid.Pile([('flow', self.overallStats), self.dbFrame])
self.loop = urwid.MainLoop(body, palette = Ui._PALETTE, unhandled_input = self.exitOnQ)
def run(self):
self.server.poll()
self.loop.set_alarm_in(2.5, self.update)
self.loop.run()
| Python |
#!/usr/bin/python
# encoding: utf-8
'''
fbtop.fbtop -- Top like tool for Firebird
'''
import os
import sys
from ui import Ui
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
__all__ = []
__version__ = 0.1
__date__ = '2013-05-19'
__updated__ = '2013-05-19'
DEBUG = 0
TESTRUN = 0
PROFILE = 0
server = None
class CLIError(Exception):
pass
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by John Ryder on %s.
Licensed under the MIT license
http://opensource.org/licenses/mit-license.php
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-s", "--server", dest="server", help="server to monitor")
parser.add_argument("-p", "--password", dest="password", help="SYSDBA password")
parser.add_argument('-v', '--version', action='version', version=program_version_message)
# Process arguments
#args = parser.parse_args()
ui = Ui("localhost", "SYSDBA", "masterkey")
ui.run()
if __name__ == "__main__":
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'fbtop.fbtop_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
| Python |
'''
Created on May 19, 2013
@author: john
'''
if __name__ == '__main__':
pass | Python |
'''
Created on May 19, 2013
@author: john
'''
from netifaces import interfaces, ifaddresses, AF_INET
import psutil
from psutil._error import NoSuchProcess, AccessDenied
import socket
from datetime import datetime, timedelta
import time
import re
from fdb import services
from uptime import uptime
class ServerError(Exception):
pass
class Server(object):
def __init__(self, host, user, password):
'''
Constructor
'''
self.host = host
self.user = user
self.password = password
self.localStats = None
# if it's a local instance, setup some tracking variables
if LocalStats.isLocalInstance(host):
self.isLocalhost = True
self.localStats = LocalStats()
else:
self.isLocalhost = False
# connection to the services API
self.__connection = None
self.attachmentCount = None
self.__prevAttachmentCount = None
self.attachmentsPerMin = '?'
self.__databaseNames = set()
self.databases = []
self.databaseCount = None
self.__lastPollTime = None
def poll(self):
# if we're not connected to the services API, connect now
if not self.__connection:
self.__connection = services.connect(self.host, self.user, self.password)
# if we're running locally, get some extra info like memory and CPU use
if self.isLocalhost:
self.localStats.poll()
currentTime = time.time()
self.attachmentCount = self.__connection.get_connection_count()
if self.__prevAttachmentCount and self.__lastPollTime:
duration = currentTime - self.__lastPollTime
self.attachmentsPerMin = (self.attachmentCount - self.__prevAttachmentCount) / (duration * 60.0)
attachedDatabaseNames = self.__connection.get_attached_database_names()
self.databaseCount = len(attachedDatabaseNames)
# loop over the names of the attached databases
for databaseName in attachedDatabaseNames:
# if we've never seen this database, create a new Database to keep track of it
if databaseName not in self.__databaseNames:
# each database uses the same services connection as Server
newDatabase = Database(databaseName, self.host, self.user, self.password, self.__connection)
self.databases.append(newDatabase);
self.__databaseNames.add(databaseName)
# poll all of the databases that have ever been attached
for database in self.databases:
database.poll()
# save some stuff for next time
self.__prevAttachmentCount = self.attachmentCount
self.__lastPollTime = currentTime
def disconnect(self):
for database in self.databases:
database.disconnect()
if self.__connection:
self.__connection.close()
self.__connection = None
class Database(object):
'''
classdocs
'''
_integerRe = re.compile(r'^.*\s(\d+)\s?$')
_attributesRe = re.compile(r'Attributes\s*(.*)')
def __init__(self, path, host, user, password, servicesConnection):
'''
Constructor
'''
self.path = path
# self.host = host
# self.user = user
# self.password = password
self._servicesConnection = servicesConnection
# tracking variables
self._prevNextTxId = None
self._prevNextAttachmentId = None
self._lastPollTime = None
# user displayable variables
self.gap = '?'
self.oitGap = '?'
self.ostGap = '?'
self.oatGap = '?'
self.attachmentCount = '?'
self.attachmentsPerMin = '?'
self.dialect = '?'
self.attributes = '?'
self.pageSize = '?'
self.transactionsPerMinute = '?'
def _extractInteger(self, line):
match = re.match(Database._integerRe, line)
if match:
return int(match.group(1))
else:
return None
def _extractAttributes(self, line):
attributesMatch = re.search(Database._attributesRe, line)
if attributesMatch:
self.attributes = attributesMatch.group(1)
def poll(self):
self._servicesConnection.get_statistics(self.path, show_only_db_log_pages = False, show_only_db_header_pages = True, show_user_data_pages = False, show_user_index_pages = False, show_system_tables_and_indexes = False, show_record_versions = False, callback = None)
lines = self._servicesConnection.readlines()
currentTime = time.time();
for line in lines:
if 'Page size' in line:
self.pageSize = self._extractInteger(line)
elif 'Oldest transaction' in line:
oit = self._extractInteger(line)
elif 'Oldest active' in line:
oat = self._extractInteger(line)
elif 'Oldest snapshot' in line:
ost = self._extractInteger(line)
elif 'Next transaction' in line:
nextTxId = self._extractInteger(line)
elif 'Next attachment ID' in line:
nextAttachmentId = self._extractInteger(line)
elif 'Database dialect' in line:
self.dialect = self._extractInteger(line)
elif 'Attributes' in line:
self._extractAttributes(line)
if self._prevNextTxId and self._lastPollTime:
elapsed = currentTime - self._lastPollTime
if elapsed > 0.0:
transactionCount = nextTxId - self._prevNextTxId
txPerMinFloat = (float(transactionCount) / elapsed) * 60.0
self.transactionsPerMinute = round(txPerMinFloat, 1)
self.oitGap = nextTxId - oit
self.oatGap = nextTxId - oat
self.ostGap = nextTxId - ost
self.gap = min(self.oitGap, self.oatGap, self.ostGap)
self._prevNextTxId = nextTxId
self._prevNextAttachmentId = nextAttachmentId
self._lastPollTime = currentTime;
def disconnect(self):
pass;
class LocalStatsError(Exception):
pass
class LocalStats(object):
__firebirdProcessNames = ['fb_smp_server', 'fb_inet_server']
__removeFractionalSecondsRe = re.compile(r'\..*$')
# get a list of the IP addresses, hostnames, and FQDN that the LocalStats is known by
@staticmethod
def __getLocalhostAliases():
aliases = []
for interface in interfaces():
addresses = ifaddresses(interface)
if AF_INET in addresses:
for link in addresses[AF_INET]:
aliases.append(link['addr'].lower())
aliases.append('localhost')
aliases.append(socket.gethostname().lower())
aliases.append(socket.getfqdn().lower())
return aliases
@classmethod
def isLocalInstance(cls, hostname):
return hostname.lower() in cls.__getLocalhostAliases();
def __durationAsString(self, startTimeSeconds):
dateDiff = timedelta(seconds = startTimeSeconds)
# remove fractions of a second. for example, '6:23:08.3823' => '6:23:08'
return re.sub(LocalStats.__removeFractionalSecondsRe, '', str(dateDiff))
def __init__(self):
self.uptime = self.__durationAsString(uptime())
self.firebirdUptime = '?'
self.memoryPercent = '?'
# textual description of bytes read/written, calculated based on the current value
# minus the value since the last poll
self.ioRead = '?'
self.ioWrite = '?'
self.__pid = None
self.__fbStartTime = None
# the value of the previous bytes read/written counters at the time of the last poll
self.__prevBytesRead = None
self.__prevBytesWritten = None
def __findFirebirdPid(self):
# find all the Firebird processes running on this box
firebirdProcessIds = []
pidList = psutil.get_pid_list()
for pid in pidList:
process = psutil.Process(pid)
if process.name in LocalStats.__firebirdProcessNames:
# optimistically assume that this is the process we're interested in and grab
# its start time. we'll throw an exception later if isn't.
self.__fbStartTime = process.create_time
firebirdProcessIds.append(pid)
badPidException = None
# if there's more than one Firebird instance or we couldn't find an instance,
# then throw an exception
if len(firebirdProcessIds) > 1:
badPidException = LocalStatsError("more than one Firebird server was found")
elif len(firebirdProcessIds) == 0:
badPidException = LocalStatsError("no instance of Firebird was found")
if badPidException:
raise(badPidException)
self.__pid = firebirdProcessIds[0]
def poll(self):
# if we haven't already figured out what the PID for the local Firebird instance is,
# figure it out now
if not self.__pid:
self.__findFirebirdPid()
currentTime = time.time()
# update our human readable uptime duration
self.uptime = self.__durationAsString(uptime())
self.firebirdUptime = self.__durationAsString(currentTime - self.__fbStartTime)
try:
process = psutil.Process(self.__pid)
try:
# basic per-process memory stats are known to work on Linux and Mac OS X
self.memoryPercent = process.get_memory_percent()
self.rss = process.get_memory_info().rss
# psutil doesn't support per-process IO counters on Mac OS X
if hasattr(process, 'get_io_counters'):
ioInfo = process.get_io_counters()
# if we had a previous reading, compute the difference between this reading and the previous
# reading as a human readable string
if self.__prevBytesRead != None and self.__prevBytesWritten != None and self._lastPollTime:
elapsed = currentTime - self.__lastPollTime
if elapsed > 0.0:
self.ioRead = str((ioInfo.read_bytes - self.__prevBytesRead) / elapsed) + ' bytes/sec'
self.ioWrite = str((ioInfo.write_bytes - self.__prevBytesWritten) / elapsed) + ' bytes/sec'
# save the current reading for next time
self.__prevBytesRead = ioInfo.read_bytes
self.__prevBytesWritten = ioInfo.write_bytes
# save the current time for the next poll
self._lastPollTime = currentTime
except AccessDenied:
self.memoryPercent = '?'
self.rss = '?'
# the Firebird process died
except NoSuchProcess:
localInstanceFailed = LocalStatsError('the local Firebird instance is no longer running')
raise(localInstanceFailed)
| Python |
import os, shutil, zipfile
import releaseconf
APPNAME = "calculator"
PATH = releaseconf.PATH # example: "d:/code/firefoxcalculator/"
DEST = releaseconf.DEST # example: "d:/cave/release/firefoxcalculatorr/"
CALCVERSION = ['1.1.32', '1.1.31', '1.1.30', '1.1.29', '1.1.28', "1.1.27", "1.1.26", "1.1.25", "1.1.24", "1.1.23", "1.1.22", "1.1.21", "1.1.20", "1.1.19", "1.1.18", "1.1.17", "1.1.16", "1.1.15", "1.1.14", '1.1.13', '1.1.12']
FIREFOXMAX = ['27.*', '26.*', '19.*', "12.*", "10.*", "8.*", "7.*", "4.0.*", "4.0b8pre", "3.7a1pre", '3.5.*', '3.2a1pre', '3.1']
DATE = ['2015.02.17', '2013.01.28', '2013.12.17', '2013.12.14', '2013.2.16', "2012.6.1", "2012.2.14", "2012.2.8", "2012.2.6", "2011.9.25", "2011.7.31", "2011.3.20", "2010.10.27", "2010.07.04", "2010.06.16", "2010.01.18", "2009.11.13", "2009.09.19", '2009.07.25', '2008.12.28', '2008.09.27']
def updatefiles(files):
for f in files:
# read
fs = open(f, "r")
all = fs.read()
fs.close()
# replace
all = all.replace(CALCVERSION[1], CALCVERSION[0])
all = all.replace(FIREFOXMAX[1], FIREFOXMAX[0])
all = all.replace(DATE[1], DATE[0])
#write
fs = open(f, "w")
fs.write(all)
fs.close()
print "updated!"
if 1:
updatefiles([PATH + "install.js",
PATH + "install.rdf",
PATH + "chrome/content/preferences.xul",
PATH + "chrome/content/help.xhtml",
PATH + "chrome/content/about.xhtml",
])
def release():
# get the extension code
try:
shutil.rmtree(DEST)
except:
pass
os.system("hg archive \"%s\"" % (DEST))
for x in ["release.py", "redirect.html", "Screenshot.jpg"]:
os.remove(DEST + x)
# set the dir structure
os.rename(DEST + "chrome", DEST + "chromefull")
os.makedirs(DEST + "chrome")
command = "cd %s && zip %s.jar -r *" % (DEST + "chromefull", APPNAME)
os.system(command)
shutil.move("%schromefull/%s.jar" % (DEST, APPNAME),
"%s/chrome" % DEST)
shutil.rmtree(DEST + "chromefull")
os.system("cd %s && zip %s.xpi -r *" % (DEST, APPNAME))
shutil.move("%s%s.xpi" % (DEST, APPNAME), "%s%s.xpi" % (PATH, APPNAME))
try:
shutil.rmtree(DEST)
shutil.rmtree("%s%s.xpi" % (DEST, APPNAME))
except:
pass
release()
| Python |
#!/usr/bin/env python2.7
import json
import logging
import os
import pprint as pp
import sys
import unittest
import urllib2
try:
from webtest import TestApp
except:
print """Please install webtest from http://webtest.pythonpaste.org/"""
sys.exit(1)
# Attempt to locate the App Engine SDK based on the system PATH
for d in sorted(os.environ['PATH'].split(os.path.pathsep)):
path = os.path.join(d, 'dev_appserver.py')
if not os.path.isfile(path):
continue
print 'Found the App Engine SDK directory: %s' % d
sys.path.insert(0, d)
# The App Engine SDK root is now expected to be in sys.path (possibly provided via PYTHONPATH)
try:
import dev_appserver
except ImportError:
error_msg = ('The path to the App Engine Python SDK must be in the '
'PYTHONPATH environment variable to run unittests.')
# The app engine SDK isn't in sys.path. If we're on Windows, we can try to
# guess where it is.
import platform
if platform.system() == 'Windows':
sys.path = sys.path + ['C:\\Program Files\\Google\\google_appengine']
try:
import dev_appserver # pylint: disable-msg=C6204
except ImportError:
print error_msg
raise
else:
print error_msg
raise
# add App Engine libraries
sys.path += dev_appserver.EXTRA_PATHS
from google.appengine.ext import testbed
from google.appengine.api import backends
class HttpMatcherTest(unittest.TestCase):
def verifyNodeJsServerRunning(self):
url = 'http://127.0.0.1:12345/ping'
try:
result = urllib2.urlopen(url)
r = json.loads(result.read())
logging.debug('%s -> %s' % (url, r))
self._serverid = r['serverid']
except urllib2.URLError, e:
self.fail('Node.js games-server must be running. Could not connect to %s\n%s' % (url, e))
def get(self, *args, **kwargs):
result = self.app.get(*args, **kwargs)
logging.debug('self.app.get %s %s -> %s' % (args, kwargs, result.body))
return result
def post(self, *args, **kwargs):
result = self.app.post(*args, **kwargs)
logging.debug('self.app.post %s %s -> %s' % (args, kwargs, result.body ))
return result
def setUp(self):
# see testbed docs https://developers.google.com/appengine/docs/python/tools/localunittesting
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
# Pretend we're a matcher backend
self.assertEquals(None, backends.get_backend())
self.save_get_backend = backends.get_backend
backends.get_backend = lambda: 'matcher'
self.assertEquals('matcher', backends.get_backend())
# create TestApp wrapper around client.main.app
from client import main
main._JSON_ENCODER.indent = None
self.app = TestApp(main.app)
#
self.verifyNodeJsServerRunning()
# no maximum length for diffs
self.maxDiff = None
def tearDown(self):
# restore patched methods
backends.get_backend = self.save_get_backend
self.testbed.deactivate()
def test_list_games_starts_emtpy(self):
res = self.get('/list-games')
self.assertEquals('{}', res.body)
def test_start_game(self):
res = self.get('/start-game')
r = json.loads(res.body)
self.assertIn('no-available-servers' , r.get('result'))
def test_start_game2(self):
self.assertTrue(self._serverid)
expected_req = { 'controller_port': 12345, 'serverid': self._serverid, 'pairing_key': 'XXXXXXXXXXXXXXXXXXXXXX' }
expected_res = { 'backend': 'matcher', 'controller_host': '127.0.0.1:12345', 'success': True }
# TODO ensure that in this post, self.request.remote_addr = 127.0.0.1
res = self.post('/register-controller', json.dumps(expected_req), extra_environ={'REMOTE_ADDR': '127.0.0.1'})
r = res.json
self.assertEquals(True, r.get('success'))
self.assertEquals(expected_res, r)
self.verifyNodeJsServerRunning()
res = self.get('/start-game')
r = res.json
self.assertEquals(True, r.get('success'))
name = r.get('name')
self.assertTrue(len(name))
time = r.get('time')
self.assertTrue(time > 0)
game_state = r.get(u'game_state')
self.assertEquals(8, game_state.get(u'max_players'))
self.assertEquals(1, game_state.get(u'min_players'))
self.assertEquals({}, game_state.get(u'players'))
self.assertEquals(name, r.get(u'name'))
self.assertTrue(r.get(u'success'))
self.assertEquals(time, r.get(u'time'))
self.assertEquals(self._serverid, r.get(u'serverid'))
self.assertEquals(u'127.0.0.1:12345', r.get(u'controller_host'))
self.assertEquals(u'http://127.0.0.1:9090/%s' % name, r.get(u'gameURL'))
self.assertEquals(9090, r.get(u'port'))
if __name__ == '__main__':
unittest.main()
| Python |
'''Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
#limitations under the License.'''
from client import db_api
from client import matcher
from google.appengine.api import app_identity
from google.appengine.api import backends
from google.appengine.api import oauth
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.api.urlfetch import fetch
import cgi
import datetime
import json
import logging
import os
import pprint as pp
import random
import sys
import traceback
import urllib
import webapp2
# constants
_DEBUG = True
_JSON_ENCODER = json.JSONEncoder()
if _DEBUG:
_JSON_ENCODER.indent = 4
_JSON_ENCODER.sort_keys = True
if backends.get_backend() == 'matcher':
_match_maker = matcher.MatchMaker()
_EMAIL_SCOPE = "https://www.googleapis.com/auth/userinfo.email"
_IS_DEVELOPMENT = os.environ['SERVER_SOFTWARE'].startswith('Development/')
#######################################################################
# common functions
#######################################################################
def tojson(python_object):
"""Helper function to output and optionally pretty print JSON."""
return _JSON_ENCODER.encode(python_object)
def fromjson(msg):
"""Helper function to ingest JSON."""
try:
return json.loads(msg)
except Exception, e:
raise Exception('Unable to parse as JSON: %s' % msg)
_PAIRING_KEY = fromjson(open('shared/pairing-key.json').read())['key']
def fetchjson(url, deadline, payload=None):
"""Fetch a remote JSON payload."""
method = "GET"
headers = {}
if payload:
method = "POST"
headers['Content-Type'] = 'application/json'
result = fetch(url, method=method, payload=payload, headers=headers, deadline=deadline).content
return fromjson(result)
def json_by_default_dispatcher(router, request, response):
"""WSGI router which defaults to 'application/json'."""
response.content_type = 'application/json'
return router.default_dispatcher(request, response)
def e(msg):
"""Convient method to raise an exception."""
raise Exception(repr(msg))
def w(msg):
"""Log a warning message."""
logging.warning('##### %s' % repr(msg))
#######################################################################
# frontend related stuff
#######################################################################
# frontend handler
class FrontendHandler(webapp2.RequestHandler):
def determine_user(self):
userID = self.request.get('userID')
if userID:
if _IS_DEVELOPMENT:
return userID, self.request.get('displayName', userID)
if userID.startswith('bot*'):
# we'll use the userID as the displayName
return userID, userID
try:
# TODO avoid http://en.wikipedia.org/wiki/Confused_deputy_problem
user = oauth.get_current_user(_EMAIL_SCOPE)
# TODO instead get a suitable displayName from https://www.googleapis.com/auth/userinfo.profile
return user.user_id(), user.nickname() # '0', 'example@example.com' in dev_appserver
except oauth.OAuthRequestError:
raise Exception("""OAuth2 credentials -or- a valid 'bot*...' userID must be provided""")
# frontend handler
class LoginHandler(FrontendHandler):
def post(self):
# for the admin only auth form
self.get()
def get(self):
config = db_api.getConfig(self.request.host_url)
if not config:
self.request_init()
return
redirectUri = '%s/logup.html' % self.request.host_url
authScope = 'https://www.googleapis.com/auth/userinfo.profile+https://www.googleapis.com/auth/userinfo.email+https://www.googleapis.com/auth/plus.me+https://www.googleapis.com/auth/plus.people.recommended';
returnto = self.request.get('redirect_uri')
authUri = 'http://accounts.google.com/o/oauth2/auth'
authUri += '?scope=' + authScope
authUri += '&redirect_uri=' + redirectUri
authUri += '&response_type=token'
authUri += '&client_id=' + str(config.client_id)
#authUri += '&state=' + returnto
self.response.headers['Access-Control-Allow-Origin'] = '*'
logging.debug('authUri=%s' % authUri)
self.redirect(authUri)
def request_init(self):
user = users.get_current_user()
if user:
if users.is_current_user_admin():
client_id = self.request.get('client_id')
client_secret = self.request.get('client_secret')
api_key = self.request.get('api_key')
if client_id and client_secret and api_key:
db_api.setConfig(self.request.host_url, client_id, client_secret, api_key)
body = 'Thank you! You may now <a href="javascript:window.location.reload();">reload</a> this page.'
else:
body = """Please enter the following information from the
<a href="https://developers.google.com/console" target="_blank">Developer Console<a> <b>%s</b> project:<br><br>
<form method="post">
<h3>Client ID for web applications<h3>
client_id:<input name="client_id"><br>
client_secret:<input name="client_secret"><br>
<h3>Simple API Access<h3>
api_key:<input name="api_key"><br>
<input type="submit">
</form>""" % self.request.host_url
else:
body = 'You (%s) are not an admin. Please <a href="%s">logout</a>.' % (user.email(), users.create_logout_url(self.request.path))
else:
body = 'Please <a href="%s">login</a> as an admin.' % users.create_login_url(self.request.path)
self.response.headers['Content-Type'] = 'text/html'
self.response.write('<html><body><h1>Datastore configuration</h1>%s</body></html>' % body)
# frontend handler
class Login(FrontendHandler):
def post(self):
self.response.headers['Access-Control-Allow-Origin'] = '*'
userID, displayName = self.determine_user()
usr = db_api.getUser(userID)
if not usr:
usr = db_api.newUser(userID, displayName)
r = {'userID': userID, 'displayName': displayName}
self.response.write(tojson(r) + '\n')
# frontend handler
class GritsService(FrontendHandler):
# TODO per client (userID) throttling to limit abuse
def post(self, fcn):
logging.info('%s ...' % self.request.url)
if not fcn:
fcn = self.request.get('fcn')
if fnc:
# TODO remove once there are no more uses of ?fcn=... in our code
logging.warning('Please use /grits/%s/?foo=... instead of /grits/?fcn=%s&foo=...' % (fnc, fnc))
self.response.headers['Access-Control-Allow-Origin'] = '*'
userID, displayName = self.determine_user()
usr = db_api.getUser(userID)
if not usr:
if userID.startswith('bot*'):
usr = db_api.newUser(userID, displayName)
else:
self.response.set_status(404)
self.response.write('Grits userID not found: ' + userID)
return
if fcn == 'getProfile':
r = {'userID': userID, 'credits': str(usr.credits), 'numWins': str(usr.numWins), 'virtualItems': usr.virtualItems}
self.response.write(tojson(r))
elif fcn == 'getFriends':
self.getFriends(userID)
elif fcn == 'buyItem':
itemID = self.request.get('itemID')
if not itemID:
self.response.set_status(400)
self.response.write('Grits itemID is required')
return
r = db_api.userAttemptToBuy(userID, itemID)
self.response.write(tojson(r))
elif fcn == 'findGame':
self.findGame(userID)
else:
self.response.set_status(400)
self.response.write('Bad grits request.')
def findGame(self, userID):
# forward the request to the matcher backend
url = '%s/find-game/%s' % (backends.get_url(backend='matcher', instance=None, protocol='HTTP'), userID)
payload = '{}'
resp = urlfetch.fetch(url=url,
payload=payload,
method=urlfetch.POST,
headers={'Content-Type': 'application/json'})
self.response.set_status(resp.status_code)
self.response.headers.update(resp.headers)
self.response.write(resp.content)
logging.info('%s -> %s -> %s' % (repr(payload), url, resp.content))
def getFriends(self, userID):
config = db_api.getConfig(self.request.host_url)
assert config.api_key
token = self.request.get('accessToken')
reqUri = 'https://www.googleapis.com/plus/v1games/people/me/people/recommended';
reqUri += '?key=' + config.api_key;
reqUri += '&access_token=' + token;
result = fetchjson(reqUri, None)
self.response.write(tojson(result))
#self.response.headers['Content-Type'] = 'application/json'
#self.response.headers['Access-Control-Allow-Origin'] = '*'
#self.redirect(reqUri)
# frontend handler
class PurchaseService(FrontendHandler):
def get(self):
iap.serverPurchasePostback(self)
# frontend handler
class SharedJsonAssets(FrontendHandler):
def get(self, filename):
f = open(filename, 'r')
self.response.write(f.read())
f.close()
#######################################################################
# 'matcher' backend related stuff
#######################################################################
# 'matcher' backend handler
class JsonHandler(webapp2.RequestHandler):
"""Convenience class for handling JSON requests."""
def handle(self, params, *args):
raise Exception('subclasses must implement this method')
def post(self, *args):
logging.info('%s <- %s' % (self.request.path, self.request.body))
try:
if not self.request.body:
raise Exception('Empty request')
params = fromjson(self.request.body)
r = self.handle(params, *args)
if not r:
raise Exception('Unexpected empty response from subclass')
self.response.write(tojson(r))
except:
# clients must already be prepared to deal with non-JSON responses,
# so a raw, human readable, stack trace is fine here
self.response.set_status(500)
tb = traceback.format_exc()
logging.warn(tb)
self.response.write(tb)
# 'matcher' backend handler
class FindGame(JsonHandler):
def handle(self, params, userID):
pingAll()
return self.get_game(userID)
def get_game(self, userID):
# previously matched / player re-entering?
player_game = _match_maker.lookup_player_game(userID)
if player_game:
return player_game
# look for a new available game
result = self._get_player_game(userID)
# found a game?
if result and 'game' in result:
player_game = result
usr = db_api.getUser(userID)
addPlayers(player_game, userID, usr.displayName)
return result
# more players needed to start game?
if result.get('players_needed_for_next_game', None) > 0:
return result
# start a new game
game = startGame()
if 'game_state' not in game:
logging.info('RETURNING RESULT FROM startGame(): %s' % game)
return game
logging.info('RETURNING RESULT: %s' % result)
return result
def _get_player_game(self, userID):
player_game = _match_maker.find_player_game(userID)
return player_game
# 'matcher' backend handler
class UpdateGameState(JsonHandler):
def handle(self, params, *args):
serverid = params['serverid']
new_game_state = params['game_state']
game_name = new_game_state['name']
_match_maker.update_player_names(serverid, game_name, new_game_state)
return {'success': True,
'backend': backends.get_backend()}
# 'matcher' backend handler
class RegisterController(JsonHandler):
def handle(self, params, *args):
if _PAIRING_KEY != params['pairing_key']:
return {'success': False,
'exception': 'bad pairing key'}
controller_port = params['controller_port']
ip = self.request.remote_addr
controller_host = '%s:%d' % (ip, controller_port)
params['controller_host'] = controller_host
_match_maker.update_server_info(params)
return {'success': True,
'backend': backends.get_backend(),
'controller_host': controller_host}
# 'matcher' backend handler
class ListGames(webapp2.RequestHandler):
def get(self):
self.response.write(tojson(pingAll()))
# 'matcher' backend handler
class Debug(webapp2.RequestHandler):
def get(self):
state = _match_maker.get_state()
self.response.write(tojson(state))
# 'matcher' backend handler
class StartGame(webapp2.RequestHandler):
def get(self):
self.response.write(tojson(startGame()))
# 'matcher' backend handler
class LogFiles(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
self.response.write('<html><body><h1>Log Files</h1>')
for serverid in _match_maker.get_game_servers():
server_info = _match_maker.get_game_server_info(serverid)
self.response.write('<p>%(id)s: '
'<a href="http://%(svr)s/forever.log">error</a> '
'<a href="http://%(svr)s/log">console</a> '
'<a href="http://%(svr)s/ping">ping</a> '
'<a href="http://%(svr)s/enable-dedup">enable-dedup</a> '
'<a href="http://%(svr)s/disable-dedup">disable-dedup</a> '
'</p>' %
{'id':serverid,
'svr':server_info['controller_host']})
self.response.write('</body></html>')
# 'matcher' backend handler
class GameOver(JsonHandler):
def handle(self, params, *args):
_match_maker.del_game(params['serverid'], params['name'])
return {'success': True, 'backend': backends.get_backend()}
# 'matcher' backend helper function
def pingAll():
removeserver = []
# TODO use async urlfetch to parallelize url fetch calls
for serverid in _match_maker.get_game_servers():
server_info = _match_maker.get_game_server_info(serverid)
url = 'http://%s/ping' % server_info['controller_host']
try:
r = fetchjson(url, 2)
logging.debug('pingAll(): %s -> %s' % (url, r))
except:
logging.warn('pingAll(): EXCEPTION %s' % traceback.format_exc())
removeserver.append(serverid)
if r['serverid'] != serverid:
removeserver.append(serverid)
continue
# check for games which have ended without our knowledge
remotegameinfo = r['gameinfo']
server_struct = _match_maker.get_game_server_struct(serverid)
games = server_struct['games']
removegame = []
for name, game in games.iteritems():
if name not in remotegameinfo:
# the game is unexpectedly gone
logging.warn('serverid %s unexpectedly lost game %s (did we miss a /game-over callback?)' % (serverid, name))
removegame.append(name)
for name in removegame:
_match_maker.del_game(serverid, name)
for serverid in removeserver:
_match_maker.del_game_server(serverid)
return _match_maker.get_game_servers()
# 'matcher' backend helper function
def rateUsage(r):
return max(int(r['cpu']), int(r['mem']))
# 'matcher' backend helper function
def startGame():
logging.debug('startGame()')
best = None
bestServer = None
for serverid, server_struct in pingAll().iteritems():
if not best or rateUsage(best) > rateUsage(r):
server_info = server_struct['server_info']
best = server_info
bestServer = best['controller_host']
if bestServer:
url = 'http://%s/start-game?p=%s' % (bestServer, _PAIRING_KEY)
game = fetchjson(url, 20)
logging.debug('startGame(): %s -> %s' % (url, tojson(game)))
if game.get('success', False):
ip = bestServer.split(':')[0]
game['gameURL'] = 'http://%s:%s/%s' % (ip, game['port'], game['name'])
game['controller_host'] = bestServer
game['serverid'] = best['serverid']
_match_maker.update_game(game)
return game
else:
return {'result': 'no-available-servers'}
def addPlayers(player_game, userID, displayName):
logging.info('addPlayers(player_game=%s, userID=%s, displayName=%s)' % (player_game, userID, displayName))
game = player_game['game']
url = 'http://%s/add-players?p=%s' % (game['controller_host'], _PAIRING_KEY)
player_game_key = player_game['player_game_key']
msg = {
'userID': userID,
'displayName': displayName,
'game_name' : game['name'],
'player_game_key': player_game_key,
}
try:
result = fetchjson(url, 20, payload=tojson(msg))
logging.info('addPlayers(): %s -> %s -> %s' % (tojson(msg), url, tojson(result)))
return {
'game': game,
'player_game_key': player_game_key,
'success': True,
}
except:
exc = traceback.format_exc()
logging.info('addPlayers(): %s -> %s -> %s' % (tojson(msg), url, exc))
return {
'success' : False,
'exception': exc,
}
#######################################################################
# handler common to frontends and backends
handlers = [
]
if not backends.get_backend():
# frontend specific handlers
handlers.extend([
('/login', Login),
('/loginoauth', LoginHandler),
('/grits/(.*)', GritsService),
('/(shared/.*\.json)', SharedJsonAssets),
])
elif backends.get_backend() == 'matcher':
# 'matcher' backend specific handlers
handlers.extend([
('/find-game/(.*)', FindGame),
('/update-game-state', UpdateGameState),
('/register-controller', RegisterController),
('/list-games', ListGames),
('/debug', Debug),
('/start-game', StartGame),
('/game-over', GameOver),
('/log-files', LogFiles),
])
app = webapp2.WSGIApplication(handlers, debug=True)
app.router.set_dispatcher(json_by_default_dispatcher)
| Python |
'''Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import backends
from google.appengine.api import runtime
import datetime
import logging
_NAME = '{}.{} {} ({})'.format(backends.get_backend(),
backends.get_instance(),
backends.get_url(),
datetime.datetime.now())
def my_shutdown_hook():
logging.warning('{} SHUTDOWN HOOK CALLED'.format(_NAME))
apiproxy_stub_map.apiproxy.CancelApiCalls()
# save_state()
# May want to raise an exception
# register our shutdown hook, which is not guaranteed to be called
logging.info('{} REGISTERING SHUTDOWN HOOK'.format(_NAME))
runtime.set_shutdown_hook(my_shutdown_hook)
| Python |
#!/usr/bin/env python
import optparse
import jwt
import sys
import json
import time
__prog__ = 'jwt'
__version__ = '0.1'
""" JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
import base64
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
__all__ = ['encode', 'decode', 'DecodeError']
class DecodeError(Exception): pass
signing_methods = {
'HS256': lambda msg, key: hmac.new(key, msg, hashlib.sha256).digest(),
'HS384': lambda msg, key: hmac.new(key, msg, hashlib.sha384).digest(),
'HS512': lambda msg, key: hmac.new(key, msg, hashlib.sha512).digest(),
}
def base64url_decode(input):
input += '=' * (4 - (len(input) % 4))
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
return base64.urlsafe_b64encode(input).replace('=', '')
def header(jwt):
header_segment = jwt.split('.', 1)[0]
try:
return json.loads(base64url_decode(header_segment))
except (ValueError, TypeError):
raise DecodeError("Invalid header encoding")
def encode(payload, key, algorithm='HS256'):
segments = []
header = {"typ": "JWT", "alg": algorithm}
segments.append(base64url_encode(json.dumps(header)))
segments.append(base64url_encode(json.dumps(payload)))
signing_input = '.'.join(segments)
try:
if isinstance(key, unicode):
key = key.encode('utf-8')
signature = signing_methods[algorithm](signing_input, key)
except KeyError:
raise NotImplementedError("Algorithm not supported")
segments.append(base64url_encode(signature))
return '.'.join(segments)
def decode(jwt, key='', verify=True):
try:
signing_input, crypto_segment = jwt.rsplit('.', 1)
header_segment, payload_segment = signing_input.split('.', 1)
except ValueError:
raise DecodeError("Not enough segments")
try:
header = json.loads(base64url_decode(header_segment))
payload = json.loads(base64url_decode(payload_segment))
signature = base64url_decode(crypto_segment)
except (ValueError, TypeError):
raise DecodeError("Invalid segment encoding")
if verify:
try:
if isinstance(key, unicode):
key = key.encode('utf-8')
if not signature == signing_methods[header['alg']](signing_input, key):
raise DecodeError("Signature verification failed")
except KeyError:
raise DecodeError("Algorithm not supported")
return payload
def fix_optionparser_whitespace(input):
"""Hacks around whitespace Nazi-ism in OptionParser"""
newline = ' ' * 80
doublespace = '\033[8m.\033[0m' * 2
return input.replace(' ', doublespace).replace('\n', newline)
def main():
"""Encodes or decodes JSON Web Tokens based on input
Decoding examples:
%prog --key=secret json.web.token
%prog --no-verify json.web.token
Encoding requires the key option and takes space separated key/value pairs
separated by equals (=) as input. Examples:
%prog --key=secret iss=me exp=1302049071
%prog --key=secret foo=bar exp=+10
The exp key is special and can take an offset to current Unix time.
"""
p = optparse.OptionParser(description=fix_optionparser_whitespace(main.__doc__),
prog=__prog__,
version='%s %s' % (__prog__, __version__),
usage='%prog [options] input')
p.add_option('-n', '--no-verify', action='store_false', dest='verify', default=True,
help='ignore signature verification on decode')
p.add_option('--key', dest='key', metavar='KEY', default=None,
help='set the secret key to sign with')
p.add_option('--alg', dest='algorithm', metavar='ALG', default='HS256',
help='set crypto algorithm to sign with. default=HS256')
options, arguments = p.parse_args()
if len(arguments) > 0 or not sys.stdin.isatty():
# Try to decode
try:
if not sys.stdin.isatty():
token = sys.stdin.read()
else:
token = arguments[0]
valid_jwt = jwt.header(token)
if valid_jwt:
try:
print json.dumps(jwt.decode(token, key=options.key, verify=options.verify))
sys.exit(0)
except jwt.DecodeError, e:
print e
sys.exit(1)
except jwt.DecodeError:
pass
# Try to encode
if options.key is None:
print "Key is required when encoding. See --help for usage."
sys.exit(1)
# Build payload object to encode
payload = {}
for arg in arguments:
try:
k,v = arg.split('=', 1)
# exp +offset special case?
if k == 'exp' and v[0] == '+' and len(v) > 1:
v = str(int(time.time()+int(v[1:])))
# Cast to integer?
if v.isdigit():
v = int(v)
else:
# Cast to float?
try:
v = float(v)
except ValueError:
pass
# Cast to true, false, or null?
constants = {'true': True, 'false': False, 'null': None}
if v in constants:
v = constants[v]
payload[k] = v
except ValueError:
print "Invalid encoding input at %s" % arg
sys.exit(1)
try:
print jwt.encode(payload, key=options.key, algorithm=options.algorithm)
sys.exit(0)
except Exception, e:
print e
sys.exit(1)
else:
p.print_help()
if __name__ == '__main__':
main()
| Python |
'''Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
#limitations under the License.'''
from google.appengine.api import backends
from google.appengine.api import users
import copy
import logging
import pprint as pp
import random
import sys
from client import db_api
def e(msg):
"""Convient method to raise an exception."""
raise Exception(repr(msg))
def w(msg):
"""Log a warning message."""
logging.warning('##### %s' % repr(msg))
class MatchMaker:
"""
Multiple player match making service, allowing players
to come together in an arena to show off their skills.
_game_servers =
{ serverid1: <server_struct>,
serverid2: <server_struct>,
}
<server_struct> =
{ 'server_info': <server_info>,
'games': { name1: <game>,
name2: <game>,
},
}
<server_info> =
{ 'serverid': ...,
'uptime': ...,
}
<game> =
{ serverid': ...,
'name': 'mPdn',
'gameURL': 'http://127.0.0.1:9090/mPdn',
'port': 9090,
'controller_host': '127.0.0.1:12345',
'game_state': {'players': {'324324382934982374823748923': '!1'}, 'min_players': 2, 'max_players': 8},
}
"""
_EMPTY_SERVER = { 'games' : {} }
def __init__(self):
self._game_servers = {}
self._players_waiting = []
self._players_playing = {}
def get_game_server_struct(self, serverid):
assert serverid
return self._game_servers.get(serverid, None)
def get_game_server_info(self, serverid):
assert serverid
server_struct = self.get_game_server_struct(serverid)
return server_struct['server_info']
def _set_game_server_struct(self, serverid, server_struct):
self._game_servers[serverid] = server_struct
def _set_game_server_info(self, serverid, server_info):
assert serverid
assert server_info
server_struct = self.get_game_server_struct(serverid)
if not server_struct:
server_struct = copy.deepcopy(MatchMaker._EMPTY_SERVER)
self._set_game_server_struct(serverid, server_struct)
server_struct['server_info'] = server_info
def get_state(self):
return {
'game_servers': self._game_servers,
'players_waiting': self._players_waiting,
'players_playing': self._players_playing,
}
def get_game_servers(self):
return self._game_servers
def del_game_server(self, serverid):
del self._game_servers[serverid]
remove = []
for player, player_game in self._players_playing.iteritems():
game = player_game['game']
if game['serverid'] == serverid:
remove.append(player)
for r in remove:
self._players_playing.pop(r)
def update_player_names(self, serverid, game_name, new_game_state):
server_struct = self.get_game_server_struct(serverid)
games = server_struct['games']
game = games[game_name]
game_state = game['game_state']
players = game_state['players']
new_players = new_game_state['players']
logging.info('Updating %s with %s' % (repr(players), repr(new_players)))
assert isinstance(players, dict)
assert isinstance(new_players, dict)
players.update(new_players)
def update_server_info(self, server_info):
serverid = server_info['serverid']
self._set_game_server_info(serverid, server_info)
def update_game(self, game):
serverid = game['serverid']
name = game['name']
assert serverid in self._game_servers
server_struct = self.get_game_server_struct(serverid)
games = server_struct['games']
games[name] = game
def del_game(self, serverid, game_name):
server_struct = self.get_game_server_struct(serverid)
games = server_struct['games']
game = games[game_name]
game_state = game['game_state']
players = game_state['players']
for p in players:
self._players_playing.pop(p)
del games[game_name]
def _add_player(self, userID, game):
assert isinstance(userID, str)
game_state = game['game_state']
min_players = int(game_state['min_players'])
max_players = int(game_state['max_players'])
players = game_state['players']
assert max_players >= min_players
assert len(players) < max_players
assert userID not in game_state['players']
players[userID] = 'TBD'
self._players_playing[userID] = {
'game': game,
'player_game_key': str(random.randint(-sys.maxint, sys.maxint)),
'userID': userID, # used by Android client
}
def make_matches(self):
if not self._players_waiting:
return
# TODO match based on skills instead of capacity
players_needed_for_next_game = self.make_matches_min_players()
if self._players_waiting:
self.make_matches_max_players()
return players_needed_for_next_game
def make_matches_min_players(self):
players_needed_for_next_game = -1
for server_struct in self._game_servers.itervalues():
for game in server_struct['games'].itervalues():
game_state = game['game_state']
players_in_game = game_state['players']
player_goal = int(game_state['min_players'])
players_needed = player_goal - len(players_in_game)
if not players_needed:
continue
if len(self._players_waiting) >= players_needed:
# let's get this party started
while len(players_in_game) < player_goal:
self._add_player(self._players_waiting.pop(0), game)
elif (players_needed_for_next_game == -1
or players_needed < players_needed_for_next_game):
players_needed_for_next_game = players_needed
return players_needed_for_next_game
def make_matches_max_players(self):
for server_struct in self._game_servers.itervalues():
for game in server_struct['games'].itervalues():
game_state = game['game_state']
players_in_game = game_state['players']
if len(players_in_game) < int(game_state['min_players']):
continue
player_goal = int(game_state['max_players'])
if len(players_in_game) == player_goal:
continue
while self._players_waiting and len(players_in_game) < player_goal:
self._add_player(self._players_waiting.pop(0), game)
def lookup_player_game(self, userID):
assert isinstance(userID, str)
return self._players_playing.get(userID, None)
def find_player_game(self, userID):
assert isinstance(userID, str)
if userID not in self._players_waiting:
self._players_waiting.append(userID)
players_needed_for_next_game = self.make_matches()
if userID in self._players_waiting:
#logging.info('find_player_game: %s must wait a little bit longer' % userID)
return {'result': 'wait', 'players_needed_for_next_game': players_needed_for_next_game}
player_game = self._players_playing.get(userID, None)
if not player_game:
raise Exception('userID %s is not in self._players_playing' % userID)
return player_game
| Python |
'''Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
#limitations under the License.'''
import json
import logging
import os
with open('shared/weapons/Weapons.json', 'r') as f:
_WEAPONS = json.loads(f.read())
def getVirtualItem(itemID):
assert itemID
for item in _WEAPONS['weapons']:
if item['itemID'] == itemID:
return item
raise Exception('itemID "%s" does not exist' % itemID)
| Python |
'''Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
#limitations under the License.''' | Python |
'''Copyright 2011 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
#limitations under the License.'''
from google.appengine.api import backends
from google.appengine.ext import ndb
import json
import logging
import os
from client import shared
_IS_DEVELOPMENT = os.environ['SERVER_SOFTWARE'].startswith('Development/')
class Config(ndb.Model):
# Client ID for web applications
client_id = ndb.StringProperty(indexed=False)
client_secret = ndb.StringProperty(indexed=False)
# Simple API Access
api_key = ndb.StringProperty(indexed=False)
class User(ndb.Model):
displayName = ndb.StringProperty(indexed=False)
createDate = ndb.DateTimeProperty(indexed=False)
credits = ndb.IntegerProperty(indexed=False)
numWins = ndb.IntegerProperty(indexed=False)
numLoss = ndb.IntegerProperty(indexed=False)
virtualItems = ndb.StringProperty(repeated=True, indexed=False)
def getConfig(origin):
if _IS_DEVELOPMENT:
c = json.loads(open('build/keys-localhost.json').read())
setConfig(origin, c['client_id'], c['client_secret'], c['api_key'])
return Config.get_by_id(str(origin))
def setConfig(origin, client_id, client_secret, api_key):
config = Config(id=str(origin), client_id=client_id, client_secret=client_secret, api_key=api_key)
config.put()
def getUser(userID):
return User.get_by_id(str(userID))
def newUser(userID, displayName):
usr = User(id=str(userID))
usr.displayName = displayName
usr.credits = 1000
usr.numWins = 3
usr.numLosses = 5
usr.put()
return usr
#NOTE what are we doing here, really?
#the goal is to have virtual currency, but also allow for purchacing item combos
#called when client asks to unlock an item with credits
def unlockItemForUser(userID, itemID):
usr = getUser(userID)
if not usr:
return None
vi = shared.getVirtualItem(itemID)
if not vi:
return None
#Do this the hacky way and jusr push it to the end.
usr.virtualItems.append(itemID)
usr.credits -= vi.priceInCredits
usr.push()
return True
#called during a postback call from the IAP server
def purchaseItemForUser(userID, itemID):
usr = getUser(userID)
if not usr:
return None
vi = shared.getVirtualItem(itemID)
if not vi:
return None
if vi.itemType == "credits":
usr.credits += vi.itemData0
return True
return None
def userAttemptToBuy(userID, itemID):
assert userID
assert itemID
result = ""
usr = getUser(userID)
if not usr:
return {'result': False, 'message': 'User not found'}
vi = shared.getVirtualItem(itemID)
if not vi:
return {'result': False, 'message': 'Item not found; please check with the admin'}
#if the user has enough credits for the item, unlock the item
if usr.credits >= vi['priceInCredits']:
usr.virtualItems.append(itemID)
usr.credits -= vi['priceInCredits']
usr.put()
return {'result': True, 'itemID': itemID, 'userCredits': usr.credits}
return {'result': False, 'itemID': itemID, 'userCredits': usr.credits}
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.